##// END OF EJS Templates
wireprotov2: define and implement "changesetdata" command...
Gregory Szorc -
r39666:9c2c77c7 default
parent child Browse files
Show More
@@ -0,0 +1,486
1 $ . $TESTDIR/wireprotohelpers.sh
2
3 $ hg init server
4 $ enablehttpv2 server
5 $ cd server
6 $ echo a0 > a
7 $ echo b0 > b
8
9 $ hg -q commit -A -m 'commit 0'
10
11 $ echo a1 > a
12 $ echo b1 > b
13 $ hg commit -m 'commit 1'
14 $ echo b2 > b
15 $ hg commit -m 'commit 2'
16
17 $ hg -q up -r 0
18 $ echo a2 > a
19 $ hg commit -m 'commit 3'
20 created new head
21
22 $ hg log -G -T '{rev}:{node} {desc}\n'
23 @ 3:eae5f82c2e622368d27daecb76b7e393d0f24211 commit 3
24 |
25 | o 2:0bb8ad894a15b15380b2a2a5b183e20f2a4b28dd commit 2
26 | |
27 | o 1:7592917e1c3e82677cb0a4bc715ca25dd12d28c1 commit 1
28 |/
29 o 0:3390ef850073fbc2f0dfff2244342c8e9229013a commit 0
30
31
32 $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
33 $ cat hg.pid > $DAEMON_PIDS
34
35 No arguments is an invalid request
36
37 $ sendhttpv2peer << EOF
38 > command changesetdata
39 > EOF
40 creating http peer for wire protocol version 2
41 sending changesetdata command
42 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
43 s> Accept-Encoding: identity\r\n
44 s> accept: application/mercurial-exp-framing-0005\r\n
45 s> content-type: application/mercurial-exp-framing-0005\r\n
46 s> content-length: 28\r\n
47 s> host: $LOCALIP:$HGPORT\r\n (glob)
48 s> user-agent: Mercurial debugwireproto\r\n
49 s> \r\n
50 s> \x14\x00\x00\x01\x00\x01\x01\x11\xa1DnameMchangesetdata
51 s> makefile('rb', None)
52 s> HTTP/1.1 200 OK\r\n
53 s> Server: testing stub value\r\n
54 s> Date: $HTTP_DATE$\r\n
55 s> Content-Type: application/mercurial-exp-framing-0005\r\n
56 s> Transfer-Encoding: chunked\r\n
57 s> \r\n
58 s> 49\r\n
59 s> A\x00\x00\x01\x00\x02\x012
60 s> \xa2Eerror\xa1GmessageX"noderange or nodes must be definedFstatusEerror
61 s> \r\n
62 received frame(size=65; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
63 s> 0\r\n
64 s> \r\n
65 abort: noderange or nodes must be defined!
66 [255]
67
68 Empty noderange heads results in an error
69
70 $ sendhttpv2peer << EOF
71 > command changesetdata
72 > noderange eval:[[],[]]
73 > EOF
74 creating http peer for wire protocol version 2
75 sending changesetdata command
76 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
77 s> Accept-Encoding: identity\r\n
78 s> accept: application/mercurial-exp-framing-0005\r\n
79 s> content-type: application/mercurial-exp-framing-0005\r\n
80 s> content-length: 47\r\n
81 s> host: $LOCALIP:$HGPORT\r\n (glob)
82 s> user-agent: Mercurial debugwireproto\r\n
83 s> \r\n
84 s> \'\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x80\x80DnameMchangesetdata
85 s> makefile('rb', None)
86 s> HTTP/1.1 200 OK\r\n
87 s> Server: testing stub value\r\n
88 s> Date: $HTTP_DATE$\r\n
89 s> Content-Type: application/mercurial-exp-framing-0005\r\n
90 s> Transfer-Encoding: chunked\r\n
91 s> \r\n
92 s> 51\r\n
93 s> I\x00\x00\x01\x00\x02\x012
94 s> \xa2Eerror\xa1GmessageX*heads in noderange request cannot be emptyFstatusEerror
95 s> \r\n
96 received frame(size=73; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=eos)
97 s> 0\r\n
98 s> \r\n
99 abort: heads in noderange request cannot be empty!
100 [255]
101
102 Sending just noderange heads sends all revisions
103
104 $ sendhttpv2peer << EOF
105 > command changesetdata
106 > noderange eval:[[], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd', b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']]
107 > EOF
108 creating http peer for wire protocol version 2
109 sending changesetdata command
110 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
111 s> Accept-Encoding: identity\r\n
112 s> accept: application/mercurial-exp-framing-0005\r\n
113 s> content-type: application/mercurial-exp-framing-0005\r\n
114 s> content-length: 89\r\n
115 s> host: $LOCALIP:$HGPORT\r\n (glob)
116 s> user-agent: Mercurial debugwireproto\r\n
117 s> \r\n
118 s> Q\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x80\x82T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
119 s> makefile('rb', None)
120 s> HTTP/1.1 200 OK\r\n
121 s> Server: testing stub value\r\n
122 s> Date: $HTTP_DATE$\r\n
123 s> Content-Type: application/mercurial-exp-framing-0005\r\n
124 s> Transfer-Encoding: chunked\r\n
125 s> \r\n
126 s> 13\r\n
127 s> \x0b\x00\x00\x01\x00\x02\x011
128 s> \xa1FstatusBok
129 s> \r\n
130 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
131 s> 81\r\n
132 s> y\x00\x00\x01\x00\x02\x001
133 s> \xa1Jtotalitems\x04\xa1DnodeT3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\xa1DnodeTu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd\xa1DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11
134 s> \r\n
135 received frame(size=121; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
136 s> 8\r\n
137 s> \x00\x00\x00\x01\x00\x02\x002
138 s> \r\n
139 s> 0\r\n
140 s> \r\n
141 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
142 response: gen[
143 {
144 b'totalitems': 4
145 },
146 {
147 b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
148 },
149 {
150 b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
151 },
152 {
153 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
154 },
155 {
156 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
157 }
158 ]
159
160 Sending root nodes limits what data is sent
161
162 $ sendhttpv2peer << EOF
163 > command changesetdata
164 > noderange eval:[[b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a'], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd']]
165 > EOF
166 creating http peer for wire protocol version 2
167 sending changesetdata command
168 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
169 s> Accept-Encoding: identity\r\n
170 s> accept: application/mercurial-exp-framing-0005\r\n
171 s> content-type: application/mercurial-exp-framing-0005\r\n
172 s> content-length: 89\r\n
173 s> host: $LOCALIP:$HGPORT\r\n (glob)
174 s> user-agent: Mercurial debugwireproto\r\n
175 s> \r\n
176 s> Q\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Inoderange\x82\x81T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:\x81T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddDnameMchangesetdata
177 s> makefile('rb', None)
178 s> HTTP/1.1 200 OK\r\n
179 s> Server: testing stub value\r\n
180 s> Date: $HTTP_DATE$\r\n
181 s> Content-Type: application/mercurial-exp-framing-0005\r\n
182 s> Transfer-Encoding: chunked\r\n
183 s> \r\n
184 s> 13\r\n
185 s> \x0b\x00\x00\x01\x00\x02\x011
186 s> \xa1FstatusBok
187 s> \r\n
188 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
189 s> 4b\r\n
190 s> C\x00\x00\x01\x00\x02\x001
191 s> \xa1Jtotalitems\x02\xa1DnodeTu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd
192 s> \r\n
193 received frame(size=67; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
194 s> 8\r\n
195 s> \x00\x00\x00\x01\x00\x02\x002
196 s> \r\n
197 s> 0\r\n
198 s> \r\n
199 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
200 response: gen[
201 {
202 b'totalitems': 2
203 },
204 {
205 b'node': b'u\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1'
206 },
207 {
208 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
209 }
210 ]
211
212 Requesting data on a single node by node works
213
214 $ sendhttpv2peer << EOF
215 > command changesetdata
216 > nodes eval:[b'\x33\x90\xef\x85\x00\x73\xfb\xc2\xf0\xdf\xff\x22\x44\x34\x2c\x8e\x92\x29\x01\x3a']
217 > EOF
218 creating http peer for wire protocol version 2
219 sending changesetdata command
220 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
221 s> Accept-Encoding: identity\r\n
222 s> accept: application/mercurial-exp-framing-0005\r\n
223 s> content-type: application/mercurial-exp-framing-0005\r\n
224 s> content-length: 62\r\n
225 s> host: $LOCALIP:$HGPORT\r\n (glob)
226 s> user-agent: Mercurial debugwireproto\r\n
227 s> \r\n
228 s> 6\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa1Enodes\x81T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:DnameMchangesetdata
229 s> makefile('rb', None)
230 s> HTTP/1.1 200 OK\r\n
231 s> Server: testing stub value\r\n
232 s> Date: $HTTP_DATE$\r\n
233 s> Content-Type: application/mercurial-exp-framing-0005\r\n
234 s> Transfer-Encoding: chunked\r\n
235 s> \r\n
236 s> 13\r\n
237 s> \x0b\x00\x00\x01\x00\x02\x011
238 s> \xa1FstatusBok
239 s> \r\n
240 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
241 s> 30\r\n
242 s> (\x00\x00\x01\x00\x02\x001
243 s> \xa1Jtotalitems\x01\xa1DnodeT3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:
244 s> \r\n
245 received frame(size=40; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
246 s> 8\r\n
247 s> \x00\x00\x00\x01\x00\x02\x002
248 s> \r\n
249 s> 0\r\n
250 s> \r\n
251 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
252 response: gen[
253 {
254 b'totalitems': 1
255 },
256 {
257 b'node': b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:'
258 }
259 ]
260
261 Specifying a noderange and nodes takes union
262
263 $ sendhttpv2peer << EOF
264 > command changesetdata
265 > noderange eval:[[b'\x75\x92\x91\x7e\x1c\x3e\x82\x67\x7c\xb0\xa4\xbc\x71\x5c\xa2\x5d\xd1\x2d\x28\xc1'], [b'\x0b\xb8\xad\x89\x4a\x15\xb1\x53\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f\x2a\x4b\x28\xdd']]
266 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
267 > EOF
268 creating http peer for wire protocol version 2
269 sending changesetdata command
270 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
271 s> Accept-Encoding: identity\r\n
272 s> accept: application/mercurial-exp-framing-0005\r\n
273 s> content-type: application/mercurial-exp-framing-0005\r\n
274 s> content-length: 117\r\n
275 s> host: $LOCALIP:$HGPORT\r\n (glob)
276 s> user-agent: Mercurial debugwireproto\r\n
277 s> \r\n
278 s> m\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Inoderange\x82\x81Tu\x92\x91~\x1c>\x82g|\xb0\xa4\xbcq\\\xa2]\xd1-(\xc1\x81T\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xddEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
279 s> makefile('rb', None)
280 s> HTTP/1.1 200 OK\r\n
281 s> Server: testing stub value\r\n
282 s> Date: $HTTP_DATE$\r\n
283 s> Content-Type: application/mercurial-exp-framing-0005\r\n
284 s> Transfer-Encoding: chunked\r\n
285 s> \r\n
286 s> 13\r\n
287 s> \x0b\x00\x00\x01\x00\x02\x011
288 s> \xa1FstatusBok
289 s> \r\n
290 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
291 s> 4b\r\n
292 s> C\x00\x00\x01\x00\x02\x001
293 s> \xa1Jtotalitems\x02\xa1DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11\xa1DnodeT\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd
294 s> \r\n
295 received frame(size=67; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
296 s> 8\r\n
297 s> \x00\x00\x00\x01\x00\x02\x002
298 s> \r\n
299 s> 0\r\n
300 s> \r\n
301 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
302 response: gen[
303 {
304 b'totalitems': 2
305 },
306 {
307 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11'
308 },
309 {
310 b'node': b'\x0b\xb8\xad\x89J\x15\xb1S\x80\xb2\xa2\xa5\xb1\x83\xe2\x0f*K(\xdd'
311 }
312 ]
313
314 Parents data is transferred upon request
315
316 $ sendhttpv2peer << EOF
317 > command changesetdata
318 > fields eval:[b'parents']
319 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
320 > EOF
321 creating http peer for wire protocol version 2
322 sending changesetdata command
323 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
324 s> Accept-Encoding: identity\r\n
325 s> accept: application/mercurial-exp-framing-0005\r\n
326 s> content-type: application/mercurial-exp-framing-0005\r\n
327 s> content-length: 78\r\n
328 s> host: $LOCALIP:$HGPORT\r\n (glob)
329 s> user-agent: Mercurial debugwireproto\r\n
330 s> \r\n
331 s> F\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x81GparentsEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
332 s> makefile('rb', None)
333 s> HTTP/1.1 200 OK\r\n
334 s> Server: testing stub value\r\n
335 s> Date: $HTTP_DATE$\r\n
336 s> Content-Type: application/mercurial-exp-framing-0005\r\n
337 s> Transfer-Encoding: chunked\r\n
338 s> \r\n
339 s> 13\r\n
340 s> \x0b\x00\x00\x01\x00\x02\x011
341 s> \xa1FstatusBok
342 s> \r\n
343 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
344 s> 63\r\n
345 s> [\x00\x00\x01\x00\x02\x001
346 s> \xa1Jtotalitems\x01\xa2DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Gparents\x82T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
347 s> \r\n
348 received frame(size=91; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
349 s> 8\r\n
350 s> \x00\x00\x00\x01\x00\x02\x002
351 s> \r\n
352 s> 0\r\n
353 s> \r\n
354 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
355 response: gen[
356 {
357 b'totalitems': 1
358 },
359 {
360 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
361 b'parents': [
362 b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
363 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
364 ]
365 }
366 ]
367
368 Revision data is transferred upon request
369
370 $ sendhttpv2peer << EOF
371 > command changesetdata
372 > fields eval:[b'revision']
373 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
374 > EOF
375 creating http peer for wire protocol version 2
376 sending changesetdata command
377 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
378 s> Accept-Encoding: identity\r\n
379 s> accept: application/mercurial-exp-framing-0005\r\n
380 s> content-type: application/mercurial-exp-framing-0005\r\n
381 s> content-length: 79\r\n
382 s> host: $LOCALIP:$HGPORT\r\n (glob)
383 s> user-agent: Mercurial debugwireproto\r\n
384 s> \r\n
385 s> G\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x81HrevisionEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
386 s> makefile('rb', None)
387 s> HTTP/1.1 200 OK\r\n
388 s> Server: testing stub value\r\n
389 s> Date: $HTTP_DATE$\r\n
390 s> Content-Type: application/mercurial-exp-framing-0005\r\n
391 s> Transfer-Encoding: chunked\r\n
392 s> \r\n
393 s> 13\r\n
394 s> \x0b\x00\x00\x01\x00\x02\x011
395 s> \xa1FstatusBok
396 s> \r\n
397 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
398 s> 7e\r\n
399 s> v\x00\x00\x01\x00\x02\x001
400 s> \xa1Jtotalitems\x01\xa2DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Lrevisionsize\x18=X=1b74476799ec8318045db759b1b4bcc9b839d0aa\n
401 s> test\n
402 s> 0 0\n
403 s> a\n
404 s> \n
405 s> commit 3
406 s> \r\n
407 received frame(size=118; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
408 s> 8\r\n
409 s> \x00\x00\x00\x01\x00\x02\x002
410 s> \r\n
411 s> 0\r\n
412 s> \r\n
413 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
414 response: gen[
415 {
416 b'totalitems': 1
417 },
418 {
419 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
420 b'revisionsize': 61
421 },
422 b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
423 ]
424
425 Multiple fields can be transferred
426
427 $ sendhttpv2peer << EOF
428 > command changesetdata
429 > fields eval:[b'parents', b'revision']
430 > nodes eval:[b'\xea\xe5\xf8\x2c\x2e\x62\x23\x68\xd2\x7d\xae\xcb\x76\xb7\xe3\x93\xd0\xf2\x42\x11']
431 > EOF
432 creating http peer for wire protocol version 2
433 sending changesetdata command
434 s> POST /api/exp-http-v2-0001/ro/changesetdata HTTP/1.1\r\n
435 s> Accept-Encoding: identity\r\n
436 s> accept: application/mercurial-exp-framing-0005\r\n
437 s> content-type: application/mercurial-exp-framing-0005\r\n
438 s> content-length: 87\r\n
439 s> host: $LOCALIP:$HGPORT\r\n (glob)
440 s> user-agent: Mercurial debugwireproto\r\n
441 s> \r\n
442 s> O\x00\x00\x01\x00\x01\x01\x11\xa2Dargs\xa2Ffields\x82GparentsHrevisionEnodes\x81T\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11DnameMchangesetdata
443 s> makefile('rb', None)
444 s> HTTP/1.1 200 OK\r\n
445 s> Server: testing stub value\r\n
446 s> Date: $HTTP_DATE$\r\n
447 s> Content-Type: application/mercurial-exp-framing-0005\r\n
448 s> Transfer-Encoding: chunked\r\n
449 s> \r\n
450 s> 13\r\n
451 s> \x0b\x00\x00\x01\x00\x02\x011
452 s> \xa1FstatusBok
453 s> \r\n
454 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
455 s> b1\r\n
456 s> \xa9\x00\x00\x01\x00\x02\x001
457 s> \xa1Jtotalitems\x01\xa3DnodeT\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11Gparents\x82T3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Lrevisionsize\x18=X=1b74476799ec8318045db759b1b4bcc9b839d0aa\n
458 s> test\n
459 s> 0 0\n
460 s> a\n
461 s> \n
462 s> commit 3
463 s> \r\n
464 received frame(size=169; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
465 s> 8\r\n
466 s> \x00\x00\x00\x01\x00\x02\x002
467 s> \r\n
468 s> 0\r\n
469 s> \r\n
470 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
471 response: gen[
472 {
473 b'totalitems': 1
474 },
475 {
476 b'node': b'\xea\xe5\xf8,.b#h\xd2}\xae\xcbv\xb7\xe3\x93\xd0\xf2B\x11',
477 b'parents': [
478 b'3\x90\xef\x85\x00s\xfb\xc2\xf0\xdf\xff"D4,\x8e\x92)\x01:',
479 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
480 ],
481 b'revisionsize': 61
482 },
483 b'1b74476799ec8318045db759b1b4bcc9b839d0aa\ntest\n0 0\na\n\ncommit 3'
484 ]
485
486 $ cat error.log
@@ -1,2649 +1,2649
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchangev2,
29 exchangev2,
30 lock as lockmod,
30 lock as lockmod,
31 logexchange,
31 logexchange,
32 narrowspec,
32 narrowspec,
33 obsolete,
33 obsolete,
34 phases,
34 phases,
35 pushkey,
35 pushkey,
36 pycompat,
36 pycompat,
37 repository,
37 repository,
38 scmutil,
38 scmutil,
39 sslutil,
39 sslutil,
40 streamclone,
40 streamclone,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 urlerr = util.urlerr
48 urlerr = util.urlerr
49 urlreq = util.urlreq
49 urlreq = util.urlreq
50
50
51 _NARROWACL_SECTION = 'narrowhgacl'
51 _NARROWACL_SECTION = 'narrowhgacl'
52
52
53 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {'v1': '01',
54 _bundlespeccgversions = {'v1': '01',
55 'v2': '02',
55 'v2': '02',
56 'packed1': 's1',
56 'packed1': 's1',
57 'bundle2': '02', #legacy
57 'bundle2': '02', #legacy
58 }
58 }
59
59
60 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
61 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
62 'v1': {
62 'v1': {
63 'changegroup': True,
63 'changegroup': True,
64 'cg.version': '01',
64 'cg.version': '01',
65 'obsolescence': False,
65 'obsolescence': False,
66 'phases': False,
66 'phases': False,
67 'tagsfnodescache': False,
67 'tagsfnodescache': False,
68 'revbranchcache': False
68 'revbranchcache': False
69 },
69 },
70 'v2': {
70 'v2': {
71 'changegroup': True,
71 'changegroup': True,
72 'cg.version': '02',
72 'cg.version': '02',
73 'obsolescence': False,
73 'obsolescence': False,
74 'phases': False,
74 'phases': False,
75 'tagsfnodescache': True,
75 'tagsfnodescache': True,
76 'revbranchcache': True
76 'revbranchcache': True
77 },
77 },
78 'packed1' : {
78 'packed1' : {
79 'cg.version': 's1'
79 'cg.version': 's1'
80 }
80 }
81 }
81 }
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
83
83
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
85 "tagsfnodescache": False,
85 "tagsfnodescache": False,
86 "revbranchcache": False}}
86 "revbranchcache": False}}
87
87
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
90
90
91 @attr.s
91 @attr.s
92 class bundlespec(object):
92 class bundlespec(object):
93 compression = attr.ib()
93 compression = attr.ib()
94 wirecompression = attr.ib()
94 wirecompression = attr.ib()
95 version = attr.ib()
95 version = attr.ib()
96 wireversion = attr.ib()
96 wireversion = attr.ib()
97 params = attr.ib()
97 params = attr.ib()
98 contentopts = attr.ib()
98 contentopts = attr.ib()
99
99
100 def parsebundlespec(repo, spec, strict=True):
100 def parsebundlespec(repo, spec, strict=True):
101 """Parse a bundle string specification into parts.
101 """Parse a bundle string specification into parts.
102
102
103 Bundle specifications denote a well-defined bundle/exchange format.
103 Bundle specifications denote a well-defined bundle/exchange format.
104 The content of a given specification should not change over time in
104 The content of a given specification should not change over time in
105 order to ensure that bundles produced by a newer version of Mercurial are
105 order to ensure that bundles produced by a newer version of Mercurial are
106 readable from an older version.
106 readable from an older version.
107
107
108 The string currently has the form:
108 The string currently has the form:
109
109
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
111
111
112 Where <compression> is one of the supported compression formats
112 Where <compression> is one of the supported compression formats
113 and <type> is (currently) a version string. A ";" can follow the type and
113 and <type> is (currently) a version string. A ";" can follow the type and
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
115 pairs.
115 pairs.
116
116
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
118 it is optional.
118 it is optional.
119
119
120 Returns a bundlespec object of (compression, version, parameters).
120 Returns a bundlespec object of (compression, version, parameters).
121 Compression will be ``None`` if not in strict mode and a compression isn't
121 Compression will be ``None`` if not in strict mode and a compression isn't
122 defined.
122 defined.
123
123
124 An ``InvalidBundleSpecification`` is raised when the specification is
124 An ``InvalidBundleSpecification`` is raised when the specification is
125 not syntactically well formed.
125 not syntactically well formed.
126
126
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
128 bundle type/version is not recognized.
128 bundle type/version is not recognized.
129
129
130 Note: this function will likely eventually return a more complex data
130 Note: this function will likely eventually return a more complex data
131 structure, including bundle2 part information.
131 structure, including bundle2 part information.
132 """
132 """
133 def parseparams(s):
133 def parseparams(s):
134 if ';' not in s:
134 if ';' not in s:
135 return s, {}
135 return s, {}
136
136
137 params = {}
137 params = {}
138 version, paramstr = s.split(';', 1)
138 version, paramstr = s.split(';', 1)
139
139
140 for p in paramstr.split(';'):
140 for p in paramstr.split(';'):
141 if '=' not in p:
141 if '=' not in p:
142 raise error.InvalidBundleSpecification(
142 raise error.InvalidBundleSpecification(
143 _('invalid bundle specification: '
143 _('invalid bundle specification: '
144 'missing "=" in parameter: %s') % p)
144 'missing "=" in parameter: %s') % p)
145
145
146 key, value = p.split('=', 1)
146 key, value = p.split('=', 1)
147 key = urlreq.unquote(key)
147 key = urlreq.unquote(key)
148 value = urlreq.unquote(value)
148 value = urlreq.unquote(value)
149 params[key] = value
149 params[key] = value
150
150
151 return version, params
151 return version, params
152
152
153
153
154 if strict and '-' not in spec:
154 if strict and '-' not in spec:
155 raise error.InvalidBundleSpecification(
155 raise error.InvalidBundleSpecification(
156 _('invalid bundle specification; '
156 _('invalid bundle specification; '
157 'must be prefixed with compression: %s') % spec)
157 'must be prefixed with compression: %s') % spec)
158
158
159 if '-' in spec:
159 if '-' in spec:
160 compression, version = spec.split('-', 1)
160 compression, version = spec.split('-', 1)
161
161
162 if compression not in util.compengines.supportedbundlenames:
162 if compression not in util.compengines.supportedbundlenames:
163 raise error.UnsupportedBundleSpecification(
163 raise error.UnsupportedBundleSpecification(
164 _('%s compression is not supported') % compression)
164 _('%s compression is not supported') % compression)
165
165
166 version, params = parseparams(version)
166 version, params = parseparams(version)
167
167
168 if version not in _bundlespeccgversions:
168 if version not in _bundlespeccgversions:
169 raise error.UnsupportedBundleSpecification(
169 raise error.UnsupportedBundleSpecification(
170 _('%s is not a recognized bundle version') % version)
170 _('%s is not a recognized bundle version') % version)
171 else:
171 else:
172 # Value could be just the compression or just the version, in which
172 # Value could be just the compression or just the version, in which
173 # case some defaults are assumed (but only when not in strict mode).
173 # case some defaults are assumed (but only when not in strict mode).
174 assert not strict
174 assert not strict
175
175
176 spec, params = parseparams(spec)
176 spec, params = parseparams(spec)
177
177
178 if spec in util.compengines.supportedbundlenames:
178 if spec in util.compengines.supportedbundlenames:
179 compression = spec
179 compression = spec
180 version = 'v1'
180 version = 'v1'
181 # Generaldelta repos require v2.
181 # Generaldelta repos require v2.
182 if 'generaldelta' in repo.requirements:
182 if 'generaldelta' in repo.requirements:
183 version = 'v2'
183 version = 'v2'
184 # Modern compression engines require v2.
184 # Modern compression engines require v2.
185 if compression not in _bundlespecv1compengines:
185 if compression not in _bundlespecv1compengines:
186 version = 'v2'
186 version = 'v2'
187 elif spec in _bundlespeccgversions:
187 elif spec in _bundlespeccgversions:
188 if spec == 'packed1':
188 if spec == 'packed1':
189 compression = 'none'
189 compression = 'none'
190 else:
190 else:
191 compression = 'bzip2'
191 compression = 'bzip2'
192 version = spec
192 version = spec
193 else:
193 else:
194 raise error.UnsupportedBundleSpecification(
194 raise error.UnsupportedBundleSpecification(
195 _('%s is not a recognized bundle specification') % spec)
195 _('%s is not a recognized bundle specification') % spec)
196
196
197 # Bundle version 1 only supports a known set of compression engines.
197 # Bundle version 1 only supports a known set of compression engines.
198 if version == 'v1' and compression not in _bundlespecv1compengines:
198 if version == 'v1' and compression not in _bundlespecv1compengines:
199 raise error.UnsupportedBundleSpecification(
199 raise error.UnsupportedBundleSpecification(
200 _('compression engine %s is not supported on v1 bundles') %
200 _('compression engine %s is not supported on v1 bundles') %
201 compression)
201 compression)
202
202
203 # The specification for packed1 can optionally declare the data formats
203 # The specification for packed1 can optionally declare the data formats
204 # required to apply it. If we see this metadata, compare against what the
204 # required to apply it. If we see this metadata, compare against what the
205 # repo supports and error if the bundle isn't compatible.
205 # repo supports and error if the bundle isn't compatible.
206 if version == 'packed1' and 'requirements' in params:
206 if version == 'packed1' and 'requirements' in params:
207 requirements = set(params['requirements'].split(','))
207 requirements = set(params['requirements'].split(','))
208 missingreqs = requirements - repo.supportedformats
208 missingreqs = requirements - repo.supportedformats
209 if missingreqs:
209 if missingreqs:
210 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
211 _('missing support for repository features: %s') %
211 _('missing support for repository features: %s') %
212 ', '.join(sorted(missingreqs)))
212 ', '.join(sorted(missingreqs)))
213
213
214 # Compute contentopts based on the version
214 # Compute contentopts based on the version
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
216
216
217 # Process the variants
217 # Process the variants
218 if "stream" in params and params["stream"] == "v2":
218 if "stream" in params and params["stream"] == "v2":
219 variant = _bundlespecvariants["streamv2"]
219 variant = _bundlespecvariants["streamv2"]
220 contentopts.update(variant)
220 contentopts.update(variant)
221
221
222 engine = util.compengines.forbundlename(compression)
222 engine = util.compengines.forbundlename(compression)
223 compression, wirecompression = engine.bundletype()
223 compression, wirecompression = engine.bundletype()
224 wireversion = _bundlespeccgversions[version]
224 wireversion = _bundlespeccgversions[version]
225
225
226 return bundlespec(compression, wirecompression, version, wireversion,
226 return bundlespec(compression, wirecompression, version, wireversion,
227 params, contentopts)
227 params, contentopts)
228
228
229 def readbundle(ui, fh, fname, vfs=None):
229 def readbundle(ui, fh, fname, vfs=None):
230 header = changegroup.readexactly(fh, 4)
230 header = changegroup.readexactly(fh, 4)
231
231
232 alg = None
232 alg = None
233 if not fname:
233 if not fname:
234 fname = "stream"
234 fname = "stream"
235 if not header.startswith('HG') and header.startswith('\0'):
235 if not header.startswith('HG') and header.startswith('\0'):
236 fh = changegroup.headerlessfixup(fh, header)
236 fh = changegroup.headerlessfixup(fh, header)
237 header = "HG10"
237 header = "HG10"
238 alg = 'UN'
238 alg = 'UN'
239 elif vfs:
239 elif vfs:
240 fname = vfs.join(fname)
240 fname = vfs.join(fname)
241
241
242 magic, version = header[0:2], header[2:4]
242 magic, version = header[0:2], header[2:4]
243
243
244 if magic != 'HG':
244 if magic != 'HG':
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
246 if version == '10':
246 if version == '10':
247 if alg is None:
247 if alg is None:
248 alg = changegroup.readexactly(fh, 2)
248 alg = changegroup.readexactly(fh, 2)
249 return changegroup.cg1unpacker(fh, alg)
249 return changegroup.cg1unpacker(fh, alg)
250 elif version.startswith('2'):
250 elif version.startswith('2'):
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
252 elif version == 'S1':
252 elif version == 'S1':
253 return streamclone.streamcloneapplier(fh)
253 return streamclone.streamcloneapplier(fh)
254 else:
254 else:
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
256
256
257 def getbundlespec(ui, fh):
257 def getbundlespec(ui, fh):
258 """Infer the bundlespec from a bundle file handle.
258 """Infer the bundlespec from a bundle file handle.
259
259
260 The input file handle is seeked and the original seek position is not
260 The input file handle is seeked and the original seek position is not
261 restored.
261 restored.
262 """
262 """
263 def speccompression(alg):
263 def speccompression(alg):
264 try:
264 try:
265 return util.compengines.forbundletype(alg).bundletype()[0]
265 return util.compengines.forbundletype(alg).bundletype()[0]
266 except KeyError:
266 except KeyError:
267 return None
267 return None
268
268
269 b = readbundle(ui, fh, None)
269 b = readbundle(ui, fh, None)
270 if isinstance(b, changegroup.cg1unpacker):
270 if isinstance(b, changegroup.cg1unpacker):
271 alg = b._type
271 alg = b._type
272 if alg == '_truncatedBZ':
272 if alg == '_truncatedBZ':
273 alg = 'BZ'
273 alg = 'BZ'
274 comp = speccompression(alg)
274 comp = speccompression(alg)
275 if not comp:
275 if not comp:
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
277 return '%s-v1' % comp
277 return '%s-v1' % comp
278 elif isinstance(b, bundle2.unbundle20):
278 elif isinstance(b, bundle2.unbundle20):
279 if 'Compression' in b.params:
279 if 'Compression' in b.params:
280 comp = speccompression(b.params['Compression'])
280 comp = speccompression(b.params['Compression'])
281 if not comp:
281 if not comp:
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
283 else:
283 else:
284 comp = 'none'
284 comp = 'none'
285
285
286 version = None
286 version = None
287 for part in b.iterparts():
287 for part in b.iterparts():
288 if part.type == 'changegroup':
288 if part.type == 'changegroup':
289 version = part.params['version']
289 version = part.params['version']
290 if version in ('01', '02'):
290 if version in ('01', '02'):
291 version = 'v2'
291 version = 'v2'
292 else:
292 else:
293 raise error.Abort(_('changegroup version %s does not have '
293 raise error.Abort(_('changegroup version %s does not have '
294 'a known bundlespec') % version,
294 'a known bundlespec') % version,
295 hint=_('try upgrading your Mercurial '
295 hint=_('try upgrading your Mercurial '
296 'client'))
296 'client'))
297 elif part.type == 'stream2' and version is None:
297 elif part.type == 'stream2' and version is None:
298 # A stream2 part requires to be part of a v2 bundle
298 # A stream2 part requires to be part of a v2 bundle
299 version = "v2"
299 version = "v2"
300 requirements = urlreq.unquote(part.params['requirements'])
300 requirements = urlreq.unquote(part.params['requirements'])
301 splitted = requirements.split()
301 splitted = requirements.split()
302 params = bundle2._formatrequirementsparams(splitted)
302 params = bundle2._formatrequirementsparams(splitted)
303 return 'none-v2;stream=v2;%s' % params
303 return 'none-v2;stream=v2;%s' % params
304
304
305 if not version:
305 if not version:
306 raise error.Abort(_('could not identify changegroup version in '
306 raise error.Abort(_('could not identify changegroup version in '
307 'bundle'))
307 'bundle'))
308
308
309 return '%s-%s' % (comp, version)
309 return '%s-%s' % (comp, version)
310 elif isinstance(b, streamclone.streamcloneapplier):
310 elif isinstance(b, streamclone.streamcloneapplier):
311 requirements = streamclone.readbundle1header(fh)[2]
311 requirements = streamclone.readbundle1header(fh)[2]
312 formatted = bundle2._formatrequirementsparams(requirements)
312 formatted = bundle2._formatrequirementsparams(requirements)
313 return 'none-packed1;%s' % formatted
313 return 'none-packed1;%s' % formatted
314 else:
314 else:
315 raise error.Abort(_('unknown bundle type: %s') % b)
315 raise error.Abort(_('unknown bundle type: %s') % b)
316
316
317 def _computeoutgoing(repo, heads, common):
317 def _computeoutgoing(repo, heads, common):
318 """Computes which revs are outgoing given a set of common
318 """Computes which revs are outgoing given a set of common
319 and a set of heads.
319 and a set of heads.
320
320
321 This is a separate function so extensions can have access to
321 This is a separate function so extensions can have access to
322 the logic.
322 the logic.
323
323
324 Returns a discovery.outgoing object.
324 Returns a discovery.outgoing object.
325 """
325 """
326 cl = repo.changelog
326 cl = repo.changelog
327 if common:
327 if common:
328 hasnode = cl.hasnode
328 hasnode = cl.hasnode
329 common = [n for n in common if hasnode(n)]
329 common = [n for n in common if hasnode(n)]
330 else:
330 else:
331 common = [nullid]
331 common = [nullid]
332 if not heads:
332 if not heads:
333 heads = cl.heads()
333 heads = cl.heads()
334 return discovery.outgoing(repo, common, heads)
334 return discovery.outgoing(repo, common, heads)
335
335
336 def _forcebundle1(op):
336 def _forcebundle1(op):
337 """return true if a pull/push must use bundle1
337 """return true if a pull/push must use bundle1
338
338
339 This function is used to allow testing of the older bundle version"""
339 This function is used to allow testing of the older bundle version"""
340 ui = op.repo.ui
340 ui = op.repo.ui
341 # The goal is this config is to allow developer to choose the bundle
341 # The goal is this config is to allow developer to choose the bundle
342 # version used during exchanged. This is especially handy during test.
342 # version used during exchanged. This is especially handy during test.
343 # Value is a list of bundle version to be picked from, highest version
343 # Value is a list of bundle version to be picked from, highest version
344 # should be used.
344 # should be used.
345 #
345 #
346 # developer config: devel.legacy.exchange
346 # developer config: devel.legacy.exchange
347 exchange = ui.configlist('devel', 'legacy.exchange')
347 exchange = ui.configlist('devel', 'legacy.exchange')
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
349 return forcebundle1 or not op.remote.capable('bundle2')
349 return forcebundle1 or not op.remote.capable('bundle2')
350
350
351 class pushoperation(object):
351 class pushoperation(object):
352 """A object that represent a single push operation
352 """A object that represent a single push operation
353
353
354 Its purpose is to carry push related state and very common operations.
354 Its purpose is to carry push related state and very common operations.
355
355
356 A new pushoperation should be created at the beginning of each push and
356 A new pushoperation should be created at the beginning of each push and
357 discarded afterward.
357 discarded afterward.
358 """
358 """
359
359
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
361 bookmarks=(), pushvars=None):
361 bookmarks=(), pushvars=None):
362 # repo we push from
362 # repo we push from
363 self.repo = repo
363 self.repo = repo
364 self.ui = repo.ui
364 self.ui = repo.ui
365 # repo we push to
365 # repo we push to
366 self.remote = remote
366 self.remote = remote
367 # force option provided
367 # force option provided
368 self.force = force
368 self.force = force
369 # revs to be pushed (None is "all")
369 # revs to be pushed (None is "all")
370 self.revs = revs
370 self.revs = revs
371 # bookmark explicitly pushed
371 # bookmark explicitly pushed
372 self.bookmarks = bookmarks
372 self.bookmarks = bookmarks
373 # allow push of new branch
373 # allow push of new branch
374 self.newbranch = newbranch
374 self.newbranch = newbranch
375 # step already performed
375 # step already performed
376 # (used to check what steps have been already performed through bundle2)
376 # (used to check what steps have been already performed through bundle2)
377 self.stepsdone = set()
377 self.stepsdone = set()
378 # Integer version of the changegroup push result
378 # Integer version of the changegroup push result
379 # - None means nothing to push
379 # - None means nothing to push
380 # - 0 means HTTP error
380 # - 0 means HTTP error
381 # - 1 means we pushed and remote head count is unchanged *or*
381 # - 1 means we pushed and remote head count is unchanged *or*
382 # we have outgoing changesets but refused to push
382 # we have outgoing changesets but refused to push
383 # - other values as described by addchangegroup()
383 # - other values as described by addchangegroup()
384 self.cgresult = None
384 self.cgresult = None
385 # Boolean value for the bookmark push
385 # Boolean value for the bookmark push
386 self.bkresult = None
386 self.bkresult = None
387 # discover.outgoing object (contains common and outgoing data)
387 # discover.outgoing object (contains common and outgoing data)
388 self.outgoing = None
388 self.outgoing = None
389 # all remote topological heads before the push
389 # all remote topological heads before the push
390 self.remoteheads = None
390 self.remoteheads = None
391 # Details of the remote branch pre and post push
391 # Details of the remote branch pre and post push
392 #
392 #
393 # mapping: {'branch': ([remoteheads],
393 # mapping: {'branch': ([remoteheads],
394 # [newheads],
394 # [newheads],
395 # [unsyncedheads],
395 # [unsyncedheads],
396 # [discardedheads])}
396 # [discardedheads])}
397 # - branch: the branch name
397 # - branch: the branch name
398 # - remoteheads: the list of remote heads known locally
398 # - remoteheads: the list of remote heads known locally
399 # None if the branch is new
399 # None if the branch is new
400 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - newheads: the new remote heads (known locally) with outgoing pushed
401 # - unsyncedheads: the list of remote heads unknown locally.
401 # - unsyncedheads: the list of remote heads unknown locally.
402 # - discardedheads: the list of remote heads made obsolete by the push
402 # - discardedheads: the list of remote heads made obsolete by the push
403 self.pushbranchmap = None
403 self.pushbranchmap = None
404 # testable as a boolean indicating if any nodes are missing locally.
404 # testable as a boolean indicating if any nodes are missing locally.
405 self.incoming = None
405 self.incoming = None
406 # summary of the remote phase situation
406 # summary of the remote phase situation
407 self.remotephases = None
407 self.remotephases = None
408 # phases changes that must be pushed along side the changesets
408 # phases changes that must be pushed along side the changesets
409 self.outdatedphases = None
409 self.outdatedphases = None
410 # phases changes that must be pushed if changeset push fails
410 # phases changes that must be pushed if changeset push fails
411 self.fallbackoutdatedphases = None
411 self.fallbackoutdatedphases = None
412 # outgoing obsmarkers
412 # outgoing obsmarkers
413 self.outobsmarkers = set()
413 self.outobsmarkers = set()
414 # outgoing bookmarks
414 # outgoing bookmarks
415 self.outbookmarks = []
415 self.outbookmarks = []
416 # transaction manager
416 # transaction manager
417 self.trmanager = None
417 self.trmanager = None
418 # map { pushkey partid -> callback handling failure}
418 # map { pushkey partid -> callback handling failure}
419 # used to handle exception from mandatory pushkey part failure
419 # used to handle exception from mandatory pushkey part failure
420 self.pkfailcb = {}
420 self.pkfailcb = {}
421 # an iterable of pushvars or None
421 # an iterable of pushvars or None
422 self.pushvars = pushvars
422 self.pushvars = pushvars
423
423
424 @util.propertycache
424 @util.propertycache
425 def futureheads(self):
425 def futureheads(self):
426 """future remote heads if the changeset push succeeds"""
426 """future remote heads if the changeset push succeeds"""
427 return self.outgoing.missingheads
427 return self.outgoing.missingheads
428
428
429 @util.propertycache
429 @util.propertycache
430 def fallbackheads(self):
430 def fallbackheads(self):
431 """future remote heads if the changeset push fails"""
431 """future remote heads if the changeset push fails"""
432 if self.revs is None:
432 if self.revs is None:
433 # not target to push, all common are relevant
433 # not target to push, all common are relevant
434 return self.outgoing.commonheads
434 return self.outgoing.commonheads
435 unfi = self.repo.unfiltered()
435 unfi = self.repo.unfiltered()
436 # I want cheads = heads(::missingheads and ::commonheads)
436 # I want cheads = heads(::missingheads and ::commonheads)
437 # (missingheads is revs with secret changeset filtered out)
437 # (missingheads is revs with secret changeset filtered out)
438 #
438 #
439 # This can be expressed as:
439 # This can be expressed as:
440 # cheads = ( (missingheads and ::commonheads)
440 # cheads = ( (missingheads and ::commonheads)
441 # + (commonheads and ::missingheads))"
441 # + (commonheads and ::missingheads))"
442 # )
442 # )
443 #
443 #
444 # while trying to push we already computed the following:
444 # while trying to push we already computed the following:
445 # common = (::commonheads)
445 # common = (::commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
447 #
447 #
448 # We can pick:
448 # We can pick:
449 # * missingheads part of common (::commonheads)
449 # * missingheads part of common (::commonheads)
450 common = self.outgoing.common
450 common = self.outgoing.common
451 nm = self.repo.changelog.nodemap
451 nm = self.repo.changelog.nodemap
452 cheads = [node for node in self.revs if nm[node] in common]
452 cheads = [node for node in self.revs if nm[node] in common]
453 # and
453 # and
454 # * commonheads parents on missing
454 # * commonheads parents on missing
455 revset = unfi.set('%ln and parents(roots(%ln))',
455 revset = unfi.set('%ln and parents(roots(%ln))',
456 self.outgoing.commonheads,
456 self.outgoing.commonheads,
457 self.outgoing.missing)
457 self.outgoing.missing)
458 cheads.extend(c.node() for c in revset)
458 cheads.extend(c.node() for c in revset)
459 return cheads
459 return cheads
460
460
461 @property
461 @property
462 def commonheads(self):
462 def commonheads(self):
463 """set of all common heads after changeset bundle push"""
463 """set of all common heads after changeset bundle push"""
464 if self.cgresult:
464 if self.cgresult:
465 return self.futureheads
465 return self.futureheads
466 else:
466 else:
467 return self.fallbackheads
467 return self.fallbackheads
468
468
469 # mapping of message used when pushing bookmark
469 # mapping of message used when pushing bookmark
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
471 _('updating bookmark %s failed!\n')),
471 _('updating bookmark %s failed!\n')),
472 'export': (_("exporting bookmark %s\n"),
472 'export': (_("exporting bookmark %s\n"),
473 _('exporting bookmark %s failed!\n')),
473 _('exporting bookmark %s failed!\n')),
474 'delete': (_("deleting remote bookmark %s\n"),
474 'delete': (_("deleting remote bookmark %s\n"),
475 _('deleting remote bookmark %s failed!\n')),
475 _('deleting remote bookmark %s failed!\n')),
476 }
476 }
477
477
478
478
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
480 opargs=None):
480 opargs=None):
481 '''Push outgoing changesets (limited by revs) from a local
481 '''Push outgoing changesets (limited by revs) from a local
482 repository to remote. Return an integer:
482 repository to remote. Return an integer:
483 - None means nothing to push
483 - None means nothing to push
484 - 0 means HTTP error
484 - 0 means HTTP error
485 - 1 means we pushed and remote head count is unchanged *or*
485 - 1 means we pushed and remote head count is unchanged *or*
486 we have outgoing changesets but refused to push
486 we have outgoing changesets but refused to push
487 - other values as described by addchangegroup()
487 - other values as described by addchangegroup()
488 '''
488 '''
489 if opargs is None:
489 if opargs is None:
490 opargs = {}
490 opargs = {}
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
492 **pycompat.strkwargs(opargs))
492 **pycompat.strkwargs(opargs))
493 if pushop.remote.local():
493 if pushop.remote.local():
494 missing = (set(pushop.repo.requirements)
494 missing = (set(pushop.repo.requirements)
495 - pushop.remote.local().supported)
495 - pushop.remote.local().supported)
496 if missing:
496 if missing:
497 msg = _("required features are not"
497 msg = _("required features are not"
498 " supported in the destination:"
498 " supported in the destination:"
499 " %s") % (', '.join(sorted(missing)))
499 " %s") % (', '.join(sorted(missing)))
500 raise error.Abort(msg)
500 raise error.Abort(msg)
501
501
502 if not pushop.remote.canpush():
502 if not pushop.remote.canpush():
503 raise error.Abort(_("destination does not support push"))
503 raise error.Abort(_("destination does not support push"))
504
504
505 if not pushop.remote.capable('unbundle'):
505 if not pushop.remote.capable('unbundle'):
506 raise error.Abort(_('cannot push: destination does not support the '
506 raise error.Abort(_('cannot push: destination does not support the '
507 'unbundle wire protocol command'))
507 'unbundle wire protocol command'))
508
508
509 # get lock as we might write phase data
509 # get lock as we might write phase data
510 wlock = lock = None
510 wlock = lock = None
511 try:
511 try:
512 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # bundle2 push may receive a reply bundle touching bookmarks or other
513 # things requiring the wlock. Take it now to ensure proper ordering.
513 # things requiring the wlock. Take it now to ensure proper ordering.
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
515 if (not _forcebundle1(pushop)) and maypushback:
515 if (not _forcebundle1(pushop)) and maypushback:
516 wlock = pushop.repo.wlock()
516 wlock = pushop.repo.wlock()
517 lock = pushop.repo.lock()
517 lock = pushop.repo.lock()
518 pushop.trmanager = transactionmanager(pushop.repo,
518 pushop.trmanager = transactionmanager(pushop.repo,
519 'push-response',
519 'push-response',
520 pushop.remote.url())
520 pushop.remote.url())
521 except error.LockUnavailable as err:
521 except error.LockUnavailable as err:
522 # source repo cannot be locked.
522 # source repo cannot be locked.
523 # We do not abort the push, but just disable the local phase
523 # We do not abort the push, but just disable the local phase
524 # synchronisation.
524 # synchronisation.
525 msg = 'cannot lock source repository: %s\n' % err
525 msg = 'cannot lock source repository: %s\n' % err
526 pushop.ui.debug(msg)
526 pushop.ui.debug(msg)
527
527
528 with wlock or util.nullcontextmanager(), \
528 with wlock or util.nullcontextmanager(), \
529 lock or util.nullcontextmanager(), \
529 lock or util.nullcontextmanager(), \
530 pushop.trmanager or util.nullcontextmanager():
530 pushop.trmanager or util.nullcontextmanager():
531 pushop.repo.checkpush(pushop)
531 pushop.repo.checkpush(pushop)
532 _pushdiscovery(pushop)
532 _pushdiscovery(pushop)
533 if not _forcebundle1(pushop):
533 if not _forcebundle1(pushop):
534 _pushbundle2(pushop)
534 _pushbundle2(pushop)
535 _pushchangeset(pushop)
535 _pushchangeset(pushop)
536 _pushsyncphase(pushop)
536 _pushsyncphase(pushop)
537 _pushobsolete(pushop)
537 _pushobsolete(pushop)
538 _pushbookmark(pushop)
538 _pushbookmark(pushop)
539
539
540 if repo.ui.configbool('experimental', 'remotenames'):
540 if repo.ui.configbool('experimental', 'remotenames'):
541 logexchange.pullremotenames(repo, remote)
541 logexchange.pullremotenames(repo, remote)
542
542
543 return pushop
543 return pushop
544
544
545 # list of steps to perform discovery before push
545 # list of steps to perform discovery before push
546 pushdiscoveryorder = []
546 pushdiscoveryorder = []
547
547
548 # Mapping between step name and function
548 # Mapping between step name and function
549 #
549 #
550 # This exists to help extensions wrap steps if necessary
550 # This exists to help extensions wrap steps if necessary
551 pushdiscoverymapping = {}
551 pushdiscoverymapping = {}
552
552
553 def pushdiscovery(stepname):
553 def pushdiscovery(stepname):
554 """decorator for function performing discovery before push
554 """decorator for function performing discovery before push
555
555
556 The function is added to the step -> function mapping and appended to the
556 The function is added to the step -> function mapping and appended to the
557 list of steps. Beware that decorated function will be added in order (this
557 list of steps. Beware that decorated function will be added in order (this
558 may matter).
558 may matter).
559
559
560 You can only use this decorator for a new step, if you want to wrap a step
560 You can only use this decorator for a new step, if you want to wrap a step
561 from an extension, change the pushdiscovery dictionary directly."""
561 from an extension, change the pushdiscovery dictionary directly."""
562 def dec(func):
562 def dec(func):
563 assert stepname not in pushdiscoverymapping
563 assert stepname not in pushdiscoverymapping
564 pushdiscoverymapping[stepname] = func
564 pushdiscoverymapping[stepname] = func
565 pushdiscoveryorder.append(stepname)
565 pushdiscoveryorder.append(stepname)
566 return func
566 return func
567 return dec
567 return dec
568
568
569 def _pushdiscovery(pushop):
569 def _pushdiscovery(pushop):
570 """Run all discovery steps"""
570 """Run all discovery steps"""
571 for stepname in pushdiscoveryorder:
571 for stepname in pushdiscoveryorder:
572 step = pushdiscoverymapping[stepname]
572 step = pushdiscoverymapping[stepname]
573 step(pushop)
573 step(pushop)
574
574
575 @pushdiscovery('changeset')
575 @pushdiscovery('changeset')
576 def _pushdiscoverychangeset(pushop):
576 def _pushdiscoverychangeset(pushop):
577 """discover the changeset that need to be pushed"""
577 """discover the changeset that need to be pushed"""
578 fci = discovery.findcommonincoming
578 fci = discovery.findcommonincoming
579 if pushop.revs:
579 if pushop.revs:
580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
581 ancestorsof=pushop.revs)
581 ancestorsof=pushop.revs)
582 else:
582 else:
583 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
584 common, inc, remoteheads = commoninc
584 common, inc, remoteheads = commoninc
585 fco = discovery.findcommonoutgoing
585 fco = discovery.findcommonoutgoing
586 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
586 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
587 commoninc=commoninc, force=pushop.force)
587 commoninc=commoninc, force=pushop.force)
588 pushop.outgoing = outgoing
588 pushop.outgoing = outgoing
589 pushop.remoteheads = remoteheads
589 pushop.remoteheads = remoteheads
590 pushop.incoming = inc
590 pushop.incoming = inc
591
591
592 @pushdiscovery('phase')
592 @pushdiscovery('phase')
593 def _pushdiscoveryphase(pushop):
593 def _pushdiscoveryphase(pushop):
594 """discover the phase that needs to be pushed
594 """discover the phase that needs to be pushed
595
595
596 (computed for both success and failure case for changesets push)"""
596 (computed for both success and failure case for changesets push)"""
597 outgoing = pushop.outgoing
597 outgoing = pushop.outgoing
598 unfi = pushop.repo.unfiltered()
598 unfi = pushop.repo.unfiltered()
599 remotephases = listkeys(pushop.remote, 'phases')
599 remotephases = listkeys(pushop.remote, 'phases')
600
600
601 if (pushop.ui.configbool('ui', '_usedassubrepo')
601 if (pushop.ui.configbool('ui', '_usedassubrepo')
602 and remotephases # server supports phases
602 and remotephases # server supports phases
603 and not pushop.outgoing.missing # no changesets to be pushed
603 and not pushop.outgoing.missing # no changesets to be pushed
604 and remotephases.get('publishing', False)):
604 and remotephases.get('publishing', False)):
605 # When:
605 # When:
606 # - this is a subrepo push
606 # - this is a subrepo push
607 # - and remote support phase
607 # - and remote support phase
608 # - and no changeset are to be pushed
608 # - and no changeset are to be pushed
609 # - and remote is publishing
609 # - and remote is publishing
610 # We may be in issue 3781 case!
610 # We may be in issue 3781 case!
611 # We drop the possible phase synchronisation done by
611 # We drop the possible phase synchronisation done by
612 # courtesy to publish changesets possibly locally draft
612 # courtesy to publish changesets possibly locally draft
613 # on the remote.
613 # on the remote.
614 pushop.outdatedphases = []
614 pushop.outdatedphases = []
615 pushop.fallbackoutdatedphases = []
615 pushop.fallbackoutdatedphases = []
616 return
616 return
617
617
618 pushop.remotephases = phases.remotephasessummary(pushop.repo,
618 pushop.remotephases = phases.remotephasessummary(pushop.repo,
619 pushop.fallbackheads,
619 pushop.fallbackheads,
620 remotephases)
620 remotephases)
621 droots = pushop.remotephases.draftroots
621 droots = pushop.remotephases.draftroots
622
622
623 extracond = ''
623 extracond = ''
624 if not pushop.remotephases.publishing:
624 if not pushop.remotephases.publishing:
625 extracond = ' and public()'
625 extracond = ' and public()'
626 revset = 'heads((%%ln::%%ln) %s)' % extracond
626 revset = 'heads((%%ln::%%ln) %s)' % extracond
627 # Get the list of all revs draft on remote by public here.
627 # Get the list of all revs draft on remote by public here.
628 # XXX Beware that revset break if droots is not strictly
628 # XXX Beware that revset break if droots is not strictly
629 # XXX root we may want to ensure it is but it is costly
629 # XXX root we may want to ensure it is but it is costly
630 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
630 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
631 if not outgoing.missing:
631 if not outgoing.missing:
632 future = fallback
632 future = fallback
633 else:
633 else:
634 # adds changeset we are going to push as draft
634 # adds changeset we are going to push as draft
635 #
635 #
636 # should not be necessary for publishing server, but because of an
636 # should not be necessary for publishing server, but because of an
637 # issue fixed in xxxxx we have to do it anyway.
637 # issue fixed in xxxxx we have to do it anyway.
638 fdroots = list(unfi.set('roots(%ln + %ln::)',
638 fdroots = list(unfi.set('roots(%ln + %ln::)',
639 outgoing.missing, droots))
639 outgoing.missing, droots))
640 fdroots = [f.node() for f in fdroots]
640 fdroots = [f.node() for f in fdroots]
641 future = list(unfi.set(revset, fdroots, pushop.futureheads))
641 future = list(unfi.set(revset, fdroots, pushop.futureheads))
642 pushop.outdatedphases = future
642 pushop.outdatedphases = future
643 pushop.fallbackoutdatedphases = fallback
643 pushop.fallbackoutdatedphases = fallback
644
644
645 @pushdiscovery('obsmarker')
645 @pushdiscovery('obsmarker')
646 def _pushdiscoveryobsmarkers(pushop):
646 def _pushdiscoveryobsmarkers(pushop):
647 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
647 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
648 return
648 return
649
649
650 if not pushop.repo.obsstore:
650 if not pushop.repo.obsstore:
651 return
651 return
652
652
653 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
653 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
654 return
654 return
655
655
656 repo = pushop.repo
656 repo = pushop.repo
657 # very naive computation, that can be quite expensive on big repo.
657 # very naive computation, that can be quite expensive on big repo.
658 # However: evolution is currently slow on them anyway.
658 # However: evolution is currently slow on them anyway.
659 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
659 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
660 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
660 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
661
661
662 @pushdiscovery('bookmarks')
662 @pushdiscovery('bookmarks')
663 def _pushdiscoverybookmarks(pushop):
663 def _pushdiscoverybookmarks(pushop):
664 ui = pushop.ui
664 ui = pushop.ui
665 repo = pushop.repo.unfiltered()
665 repo = pushop.repo.unfiltered()
666 remote = pushop.remote
666 remote = pushop.remote
667 ui.debug("checking for updated bookmarks\n")
667 ui.debug("checking for updated bookmarks\n")
668 ancestors = ()
668 ancestors = ()
669 if pushop.revs:
669 if pushop.revs:
670 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
670 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
671 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
671 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
672
672
673 remotebookmark = listkeys(remote, 'bookmarks')
673 remotebookmark = listkeys(remote, 'bookmarks')
674
674
675 explicit = set([repo._bookmarks.expandname(bookmark)
675 explicit = set([repo._bookmarks.expandname(bookmark)
676 for bookmark in pushop.bookmarks])
676 for bookmark in pushop.bookmarks])
677
677
678 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
678 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
679 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
679 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
680
680
681 def safehex(x):
681 def safehex(x):
682 if x is None:
682 if x is None:
683 return x
683 return x
684 return hex(x)
684 return hex(x)
685
685
686 def hexifycompbookmarks(bookmarks):
686 def hexifycompbookmarks(bookmarks):
687 return [(b, safehex(scid), safehex(dcid))
687 return [(b, safehex(scid), safehex(dcid))
688 for (b, scid, dcid) in bookmarks]
688 for (b, scid, dcid) in bookmarks]
689
689
690 comp = [hexifycompbookmarks(marks) for marks in comp]
690 comp = [hexifycompbookmarks(marks) for marks in comp]
691 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
691 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
692
692
693 def _processcompared(pushop, pushed, explicit, remotebms, comp):
693 def _processcompared(pushop, pushed, explicit, remotebms, comp):
694 """take decision on bookmark to pull from the remote bookmark
694 """take decision on bookmark to pull from the remote bookmark
695
695
696 Exist to help extensions who want to alter this behavior.
696 Exist to help extensions who want to alter this behavior.
697 """
697 """
698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
699
699
700 repo = pushop.repo
700 repo = pushop.repo
701
701
702 for b, scid, dcid in advsrc:
702 for b, scid, dcid in advsrc:
703 if b in explicit:
703 if b in explicit:
704 explicit.remove(b)
704 explicit.remove(b)
705 if not pushed or repo[scid].rev() in pushed:
705 if not pushed or repo[scid].rev() in pushed:
706 pushop.outbookmarks.append((b, dcid, scid))
706 pushop.outbookmarks.append((b, dcid, scid))
707 # search added bookmark
707 # search added bookmark
708 for b, scid, dcid in addsrc:
708 for b, scid, dcid in addsrc:
709 if b in explicit:
709 if b in explicit:
710 explicit.remove(b)
710 explicit.remove(b)
711 pushop.outbookmarks.append((b, '', scid))
711 pushop.outbookmarks.append((b, '', scid))
712 # search for overwritten bookmark
712 # search for overwritten bookmark
713 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
713 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
714 if b in explicit:
714 if b in explicit:
715 explicit.remove(b)
715 explicit.remove(b)
716 pushop.outbookmarks.append((b, dcid, scid))
716 pushop.outbookmarks.append((b, dcid, scid))
717 # search for bookmark to delete
717 # search for bookmark to delete
718 for b, scid, dcid in adddst:
718 for b, scid, dcid in adddst:
719 if b in explicit:
719 if b in explicit:
720 explicit.remove(b)
720 explicit.remove(b)
721 # treat as "deleted locally"
721 # treat as "deleted locally"
722 pushop.outbookmarks.append((b, dcid, ''))
722 pushop.outbookmarks.append((b, dcid, ''))
723 # identical bookmarks shouldn't get reported
723 # identical bookmarks shouldn't get reported
724 for b, scid, dcid in same:
724 for b, scid, dcid in same:
725 if b in explicit:
725 if b in explicit:
726 explicit.remove(b)
726 explicit.remove(b)
727
727
728 if explicit:
728 if explicit:
729 explicit = sorted(explicit)
729 explicit = sorted(explicit)
730 # we should probably list all of them
730 # we should probably list all of them
731 pushop.ui.warn(_('bookmark %s does not exist on the local '
731 pushop.ui.warn(_('bookmark %s does not exist on the local '
732 'or remote repository!\n') % explicit[0])
732 'or remote repository!\n') % explicit[0])
733 pushop.bkresult = 2
733 pushop.bkresult = 2
734
734
735 pushop.outbookmarks.sort()
735 pushop.outbookmarks.sort()
736
736
737 def _pushcheckoutgoing(pushop):
737 def _pushcheckoutgoing(pushop):
738 outgoing = pushop.outgoing
738 outgoing = pushop.outgoing
739 unfi = pushop.repo.unfiltered()
739 unfi = pushop.repo.unfiltered()
740 if not outgoing.missing:
740 if not outgoing.missing:
741 # nothing to push
741 # nothing to push
742 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
742 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
743 return False
743 return False
744 # something to push
744 # something to push
745 if not pushop.force:
745 if not pushop.force:
746 # if repo.obsstore == False --> no obsolete
746 # if repo.obsstore == False --> no obsolete
747 # then, save the iteration
747 # then, save the iteration
748 if unfi.obsstore:
748 if unfi.obsstore:
749 # this message are here for 80 char limit reason
749 # this message are here for 80 char limit reason
750 mso = _("push includes obsolete changeset: %s!")
750 mso = _("push includes obsolete changeset: %s!")
751 mspd = _("push includes phase-divergent changeset: %s!")
751 mspd = _("push includes phase-divergent changeset: %s!")
752 mscd = _("push includes content-divergent changeset: %s!")
752 mscd = _("push includes content-divergent changeset: %s!")
753 mst = {"orphan": _("push includes orphan changeset: %s!"),
753 mst = {"orphan": _("push includes orphan changeset: %s!"),
754 "phase-divergent": mspd,
754 "phase-divergent": mspd,
755 "content-divergent": mscd}
755 "content-divergent": mscd}
756 # If we are to push if there is at least one
756 # If we are to push if there is at least one
757 # obsolete or unstable changeset in missing, at
757 # obsolete or unstable changeset in missing, at
758 # least one of the missinghead will be obsolete or
758 # least one of the missinghead will be obsolete or
759 # unstable. So checking heads only is ok
759 # unstable. So checking heads only is ok
760 for node in outgoing.missingheads:
760 for node in outgoing.missingheads:
761 ctx = unfi[node]
761 ctx = unfi[node]
762 if ctx.obsolete():
762 if ctx.obsolete():
763 raise error.Abort(mso % ctx)
763 raise error.Abort(mso % ctx)
764 elif ctx.isunstable():
764 elif ctx.isunstable():
765 # TODO print more than one instability in the abort
765 # TODO print more than one instability in the abort
766 # message
766 # message
767 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
767 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
768
768
769 discovery.checkheads(pushop)
769 discovery.checkheads(pushop)
770 return True
770 return True
771
771
772 # List of names of steps to perform for an outgoing bundle2, order matters.
772 # List of names of steps to perform for an outgoing bundle2, order matters.
773 b2partsgenorder = []
773 b2partsgenorder = []
774
774
775 # Mapping between step name and function
775 # Mapping between step name and function
776 #
776 #
777 # This exists to help extensions wrap steps if necessary
777 # This exists to help extensions wrap steps if necessary
778 b2partsgenmapping = {}
778 b2partsgenmapping = {}
779
779
780 def b2partsgenerator(stepname, idx=None):
780 def b2partsgenerator(stepname, idx=None):
781 """decorator for function generating bundle2 part
781 """decorator for function generating bundle2 part
782
782
783 The function is added to the step -> function mapping and appended to the
783 The function is added to the step -> function mapping and appended to the
784 list of steps. Beware that decorated functions will be added in order
784 list of steps. Beware that decorated functions will be added in order
785 (this may matter).
785 (this may matter).
786
786
787 You can only use this decorator for new steps, if you want to wrap a step
787 You can only use this decorator for new steps, if you want to wrap a step
788 from an extension, attack the b2partsgenmapping dictionary directly."""
788 from an extension, attack the b2partsgenmapping dictionary directly."""
789 def dec(func):
789 def dec(func):
790 assert stepname not in b2partsgenmapping
790 assert stepname not in b2partsgenmapping
791 b2partsgenmapping[stepname] = func
791 b2partsgenmapping[stepname] = func
792 if idx is None:
792 if idx is None:
793 b2partsgenorder.append(stepname)
793 b2partsgenorder.append(stepname)
794 else:
794 else:
795 b2partsgenorder.insert(idx, stepname)
795 b2partsgenorder.insert(idx, stepname)
796 return func
796 return func
797 return dec
797 return dec
798
798
799 def _pushb2ctxcheckheads(pushop, bundler):
799 def _pushb2ctxcheckheads(pushop, bundler):
800 """Generate race condition checking parts
800 """Generate race condition checking parts
801
801
802 Exists as an independent function to aid extensions
802 Exists as an independent function to aid extensions
803 """
803 """
804 # * 'force' do not check for push race,
804 # * 'force' do not check for push race,
805 # * if we don't push anything, there are nothing to check.
805 # * if we don't push anything, there are nothing to check.
806 if not pushop.force and pushop.outgoing.missingheads:
806 if not pushop.force and pushop.outgoing.missingheads:
807 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
807 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
808 emptyremote = pushop.pushbranchmap is None
808 emptyremote = pushop.pushbranchmap is None
809 if not allowunrelated or emptyremote:
809 if not allowunrelated or emptyremote:
810 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
810 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
811 else:
811 else:
812 affected = set()
812 affected = set()
813 for branch, heads in pushop.pushbranchmap.iteritems():
813 for branch, heads in pushop.pushbranchmap.iteritems():
814 remoteheads, newheads, unsyncedheads, discardedheads = heads
814 remoteheads, newheads, unsyncedheads, discardedheads = heads
815 if remoteheads is not None:
815 if remoteheads is not None:
816 remote = set(remoteheads)
816 remote = set(remoteheads)
817 affected |= set(discardedheads) & remote
817 affected |= set(discardedheads) & remote
818 affected |= remote - set(newheads)
818 affected |= remote - set(newheads)
819 if affected:
819 if affected:
820 data = iter(sorted(affected))
820 data = iter(sorted(affected))
821 bundler.newpart('check:updated-heads', data=data)
821 bundler.newpart('check:updated-heads', data=data)
822
822
823 def _pushing(pushop):
823 def _pushing(pushop):
824 """return True if we are pushing anything"""
824 """return True if we are pushing anything"""
825 return bool(pushop.outgoing.missing
825 return bool(pushop.outgoing.missing
826 or pushop.outdatedphases
826 or pushop.outdatedphases
827 or pushop.outobsmarkers
827 or pushop.outobsmarkers
828 or pushop.outbookmarks)
828 or pushop.outbookmarks)
829
829
830 @b2partsgenerator('check-bookmarks')
830 @b2partsgenerator('check-bookmarks')
831 def _pushb2checkbookmarks(pushop, bundler):
831 def _pushb2checkbookmarks(pushop, bundler):
832 """insert bookmark move checking"""
832 """insert bookmark move checking"""
833 if not _pushing(pushop) or pushop.force:
833 if not _pushing(pushop) or pushop.force:
834 return
834 return
835 b2caps = bundle2.bundle2caps(pushop.remote)
835 b2caps = bundle2.bundle2caps(pushop.remote)
836 hasbookmarkcheck = 'bookmarks' in b2caps
836 hasbookmarkcheck = 'bookmarks' in b2caps
837 if not (pushop.outbookmarks and hasbookmarkcheck):
837 if not (pushop.outbookmarks and hasbookmarkcheck):
838 return
838 return
839 data = []
839 data = []
840 for book, old, new in pushop.outbookmarks:
840 for book, old, new in pushop.outbookmarks:
841 old = bin(old)
841 old = bin(old)
842 data.append((book, old))
842 data.append((book, old))
843 checkdata = bookmod.binaryencode(data)
843 checkdata = bookmod.binaryencode(data)
844 bundler.newpart('check:bookmarks', data=checkdata)
844 bundler.newpart('check:bookmarks', data=checkdata)
845
845
846 @b2partsgenerator('check-phases')
846 @b2partsgenerator('check-phases')
847 def _pushb2checkphases(pushop, bundler):
847 def _pushb2checkphases(pushop, bundler):
848 """insert phase move checking"""
848 """insert phase move checking"""
849 if not _pushing(pushop) or pushop.force:
849 if not _pushing(pushop) or pushop.force:
850 return
850 return
851 b2caps = bundle2.bundle2caps(pushop.remote)
851 b2caps = bundle2.bundle2caps(pushop.remote)
852 hasphaseheads = 'heads' in b2caps.get('phases', ())
852 hasphaseheads = 'heads' in b2caps.get('phases', ())
853 if pushop.remotephases is not None and hasphaseheads:
853 if pushop.remotephases is not None and hasphaseheads:
854 # check that the remote phase has not changed
854 # check that the remote phase has not changed
855 checks = [[] for p in phases.allphases]
855 checks = [[] for p in phases.allphases]
856 checks[phases.public].extend(pushop.remotephases.publicheads)
856 checks[phases.public].extend(pushop.remotephases.publicheads)
857 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 checks[phases.draft].extend(pushop.remotephases.draftroots)
858 if any(checks):
858 if any(checks):
859 for nodes in checks:
859 for nodes in checks:
860 nodes.sort()
860 nodes.sort()
861 checkdata = phases.binaryencode(checks)
861 checkdata = phases.binaryencode(checks)
862 bundler.newpart('check:phases', data=checkdata)
862 bundler.newpart('check:phases', data=checkdata)
863
863
864 @b2partsgenerator('changeset')
864 @b2partsgenerator('changeset')
865 def _pushb2ctx(pushop, bundler):
865 def _pushb2ctx(pushop, bundler):
866 """handle changegroup push through bundle2
866 """handle changegroup push through bundle2
867
867
868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
869 """
869 """
870 if 'changesets' in pushop.stepsdone:
870 if 'changesets' in pushop.stepsdone:
871 return
871 return
872 pushop.stepsdone.add('changesets')
872 pushop.stepsdone.add('changesets')
873 # Send known heads to the server for race detection.
873 # Send known heads to the server for race detection.
874 if not _pushcheckoutgoing(pushop):
874 if not _pushcheckoutgoing(pushop):
875 return
875 return
876 pushop.repo.prepushoutgoinghooks(pushop)
876 pushop.repo.prepushoutgoinghooks(pushop)
877
877
878 _pushb2ctxcheckheads(pushop, bundler)
878 _pushb2ctxcheckheads(pushop, bundler)
879
879
880 b2caps = bundle2.bundle2caps(pushop.remote)
880 b2caps = bundle2.bundle2caps(pushop.remote)
881 version = '01'
881 version = '01'
882 cgversions = b2caps.get('changegroup')
882 cgversions = b2caps.get('changegroup')
883 if cgversions: # 3.1 and 3.2 ship with an empty value
883 if cgversions: # 3.1 and 3.2 ship with an empty value
884 cgversions = [v for v in cgversions
884 cgversions = [v for v in cgversions
885 if v in changegroup.supportedoutgoingversions(
885 if v in changegroup.supportedoutgoingversions(
886 pushop.repo)]
886 pushop.repo)]
887 if not cgversions:
887 if not cgversions:
888 raise ValueError(_('no common changegroup version'))
888 raise ValueError(_('no common changegroup version'))
889 version = max(cgversions)
889 version = max(cgversions)
890 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
890 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
891 'push')
891 'push')
892 cgpart = bundler.newpart('changegroup', data=cgstream)
892 cgpart = bundler.newpart('changegroup', data=cgstream)
893 if cgversions:
893 if cgversions:
894 cgpart.addparam('version', version)
894 cgpart.addparam('version', version)
895 if 'treemanifest' in pushop.repo.requirements:
895 if 'treemanifest' in pushop.repo.requirements:
896 cgpart.addparam('treemanifest', '1')
896 cgpart.addparam('treemanifest', '1')
897 def handlereply(op):
897 def handlereply(op):
898 """extract addchangegroup returns from server reply"""
898 """extract addchangegroup returns from server reply"""
899 cgreplies = op.records.getreplies(cgpart.id)
899 cgreplies = op.records.getreplies(cgpart.id)
900 assert len(cgreplies['changegroup']) == 1
900 assert len(cgreplies['changegroup']) == 1
901 pushop.cgresult = cgreplies['changegroup'][0]['return']
901 pushop.cgresult = cgreplies['changegroup'][0]['return']
902 return handlereply
902 return handlereply
903
903
904 @b2partsgenerator('phase')
904 @b2partsgenerator('phase')
905 def _pushb2phases(pushop, bundler):
905 def _pushb2phases(pushop, bundler):
906 """handle phase push through bundle2"""
906 """handle phase push through bundle2"""
907 if 'phases' in pushop.stepsdone:
907 if 'phases' in pushop.stepsdone:
908 return
908 return
909 b2caps = bundle2.bundle2caps(pushop.remote)
909 b2caps = bundle2.bundle2caps(pushop.remote)
910 ui = pushop.repo.ui
910 ui = pushop.repo.ui
911
911
912 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
912 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
913 haspushkey = 'pushkey' in b2caps
913 haspushkey = 'pushkey' in b2caps
914 hasphaseheads = 'heads' in b2caps.get('phases', ())
914 hasphaseheads = 'heads' in b2caps.get('phases', ())
915
915
916 if hasphaseheads and not legacyphase:
916 if hasphaseheads and not legacyphase:
917 return _pushb2phaseheads(pushop, bundler)
917 return _pushb2phaseheads(pushop, bundler)
918 elif haspushkey:
918 elif haspushkey:
919 return _pushb2phasespushkey(pushop, bundler)
919 return _pushb2phasespushkey(pushop, bundler)
920
920
921 def _pushb2phaseheads(pushop, bundler):
921 def _pushb2phaseheads(pushop, bundler):
922 """push phase information through a bundle2 - binary part"""
922 """push phase information through a bundle2 - binary part"""
923 pushop.stepsdone.add('phases')
923 pushop.stepsdone.add('phases')
924 if pushop.outdatedphases:
924 if pushop.outdatedphases:
925 updates = [[] for p in phases.allphases]
925 updates = [[] for p in phases.allphases]
926 updates[0].extend(h.node() for h in pushop.outdatedphases)
926 updates[0].extend(h.node() for h in pushop.outdatedphases)
927 phasedata = phases.binaryencode(updates)
927 phasedata = phases.binaryencode(updates)
928 bundler.newpart('phase-heads', data=phasedata)
928 bundler.newpart('phase-heads', data=phasedata)
929
929
930 def _pushb2phasespushkey(pushop, bundler):
930 def _pushb2phasespushkey(pushop, bundler):
931 """push phase information through a bundle2 - pushkey part"""
931 """push phase information through a bundle2 - pushkey part"""
932 pushop.stepsdone.add('phases')
932 pushop.stepsdone.add('phases')
933 part2node = []
933 part2node = []
934
934
935 def handlefailure(pushop, exc):
935 def handlefailure(pushop, exc):
936 targetid = int(exc.partid)
936 targetid = int(exc.partid)
937 for partid, node in part2node:
937 for partid, node in part2node:
938 if partid == targetid:
938 if partid == targetid:
939 raise error.Abort(_('updating %s to public failed') % node)
939 raise error.Abort(_('updating %s to public failed') % node)
940
940
941 enc = pushkey.encode
941 enc = pushkey.encode
942 for newremotehead in pushop.outdatedphases:
942 for newremotehead in pushop.outdatedphases:
943 part = bundler.newpart('pushkey')
943 part = bundler.newpart('pushkey')
944 part.addparam('namespace', enc('phases'))
944 part.addparam('namespace', enc('phases'))
945 part.addparam('key', enc(newremotehead.hex()))
945 part.addparam('key', enc(newremotehead.hex()))
946 part.addparam('old', enc('%d' % phases.draft))
946 part.addparam('old', enc('%d' % phases.draft))
947 part.addparam('new', enc('%d' % phases.public))
947 part.addparam('new', enc('%d' % phases.public))
948 part2node.append((part.id, newremotehead))
948 part2node.append((part.id, newremotehead))
949 pushop.pkfailcb[part.id] = handlefailure
949 pushop.pkfailcb[part.id] = handlefailure
950
950
951 def handlereply(op):
951 def handlereply(op):
952 for partid, node in part2node:
952 for partid, node in part2node:
953 partrep = op.records.getreplies(partid)
953 partrep = op.records.getreplies(partid)
954 results = partrep['pushkey']
954 results = partrep['pushkey']
955 assert len(results) <= 1
955 assert len(results) <= 1
956 msg = None
956 msg = None
957 if not results:
957 if not results:
958 msg = _('server ignored update of %s to public!\n') % node
958 msg = _('server ignored update of %s to public!\n') % node
959 elif not int(results[0]['return']):
959 elif not int(results[0]['return']):
960 msg = _('updating %s to public failed!\n') % node
960 msg = _('updating %s to public failed!\n') % node
961 if msg is not None:
961 if msg is not None:
962 pushop.ui.warn(msg)
962 pushop.ui.warn(msg)
963 return handlereply
963 return handlereply
964
964
965 @b2partsgenerator('obsmarkers')
965 @b2partsgenerator('obsmarkers')
966 def _pushb2obsmarkers(pushop, bundler):
966 def _pushb2obsmarkers(pushop, bundler):
967 if 'obsmarkers' in pushop.stepsdone:
967 if 'obsmarkers' in pushop.stepsdone:
968 return
968 return
969 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
969 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
970 if obsolete.commonversion(remoteversions) is None:
970 if obsolete.commonversion(remoteversions) is None:
971 return
971 return
972 pushop.stepsdone.add('obsmarkers')
972 pushop.stepsdone.add('obsmarkers')
973 if pushop.outobsmarkers:
973 if pushop.outobsmarkers:
974 markers = sorted(pushop.outobsmarkers)
974 markers = sorted(pushop.outobsmarkers)
975 bundle2.buildobsmarkerspart(bundler, markers)
975 bundle2.buildobsmarkerspart(bundler, markers)
976
976
977 @b2partsgenerator('bookmarks')
977 @b2partsgenerator('bookmarks')
978 def _pushb2bookmarks(pushop, bundler):
978 def _pushb2bookmarks(pushop, bundler):
979 """handle bookmark push through bundle2"""
979 """handle bookmark push through bundle2"""
980 if 'bookmarks' in pushop.stepsdone:
980 if 'bookmarks' in pushop.stepsdone:
981 return
981 return
982 b2caps = bundle2.bundle2caps(pushop.remote)
982 b2caps = bundle2.bundle2caps(pushop.remote)
983
983
984 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
984 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
985 legacybooks = 'bookmarks' in legacy
985 legacybooks = 'bookmarks' in legacy
986
986
987 if not legacybooks and 'bookmarks' in b2caps:
987 if not legacybooks and 'bookmarks' in b2caps:
988 return _pushb2bookmarkspart(pushop, bundler)
988 return _pushb2bookmarkspart(pushop, bundler)
989 elif 'pushkey' in b2caps:
989 elif 'pushkey' in b2caps:
990 return _pushb2bookmarkspushkey(pushop, bundler)
990 return _pushb2bookmarkspushkey(pushop, bundler)
991
991
992 def _bmaction(old, new):
992 def _bmaction(old, new):
993 """small utility for bookmark pushing"""
993 """small utility for bookmark pushing"""
994 if not old:
994 if not old:
995 return 'export'
995 return 'export'
996 elif not new:
996 elif not new:
997 return 'delete'
997 return 'delete'
998 return 'update'
998 return 'update'
999
999
1000 def _pushb2bookmarkspart(pushop, bundler):
1000 def _pushb2bookmarkspart(pushop, bundler):
1001 pushop.stepsdone.add('bookmarks')
1001 pushop.stepsdone.add('bookmarks')
1002 if not pushop.outbookmarks:
1002 if not pushop.outbookmarks:
1003 return
1003 return
1004
1004
1005 allactions = []
1005 allactions = []
1006 data = []
1006 data = []
1007 for book, old, new in pushop.outbookmarks:
1007 for book, old, new in pushop.outbookmarks:
1008 new = bin(new)
1008 new = bin(new)
1009 data.append((book, new))
1009 data.append((book, new))
1010 allactions.append((book, _bmaction(old, new)))
1010 allactions.append((book, _bmaction(old, new)))
1011 checkdata = bookmod.binaryencode(data)
1011 checkdata = bookmod.binaryencode(data)
1012 bundler.newpart('bookmarks', data=checkdata)
1012 bundler.newpart('bookmarks', data=checkdata)
1013
1013
1014 def handlereply(op):
1014 def handlereply(op):
1015 ui = pushop.ui
1015 ui = pushop.ui
1016 # if success
1016 # if success
1017 for book, action in allactions:
1017 for book, action in allactions:
1018 ui.status(bookmsgmap[action][0] % book)
1018 ui.status(bookmsgmap[action][0] % book)
1019
1019
1020 return handlereply
1020 return handlereply
1021
1021
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1023 pushop.stepsdone.add('bookmarks')
1023 pushop.stepsdone.add('bookmarks')
1024 part2book = []
1024 part2book = []
1025 enc = pushkey.encode
1025 enc = pushkey.encode
1026
1026
1027 def handlefailure(pushop, exc):
1027 def handlefailure(pushop, exc):
1028 targetid = int(exc.partid)
1028 targetid = int(exc.partid)
1029 for partid, book, action in part2book:
1029 for partid, book, action in part2book:
1030 if partid == targetid:
1030 if partid == targetid:
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1032 # we should not be called for part we did not generated
1032 # we should not be called for part we did not generated
1033 assert False
1033 assert False
1034
1034
1035 for book, old, new in pushop.outbookmarks:
1035 for book, old, new in pushop.outbookmarks:
1036 part = bundler.newpart('pushkey')
1036 part = bundler.newpart('pushkey')
1037 part.addparam('namespace', enc('bookmarks'))
1037 part.addparam('namespace', enc('bookmarks'))
1038 part.addparam('key', enc(book))
1038 part.addparam('key', enc(book))
1039 part.addparam('old', enc(old))
1039 part.addparam('old', enc(old))
1040 part.addparam('new', enc(new))
1040 part.addparam('new', enc(new))
1041 action = 'update'
1041 action = 'update'
1042 if not old:
1042 if not old:
1043 action = 'export'
1043 action = 'export'
1044 elif not new:
1044 elif not new:
1045 action = 'delete'
1045 action = 'delete'
1046 part2book.append((part.id, book, action))
1046 part2book.append((part.id, book, action))
1047 pushop.pkfailcb[part.id] = handlefailure
1047 pushop.pkfailcb[part.id] = handlefailure
1048
1048
1049 def handlereply(op):
1049 def handlereply(op):
1050 ui = pushop.ui
1050 ui = pushop.ui
1051 for partid, book, action in part2book:
1051 for partid, book, action in part2book:
1052 partrep = op.records.getreplies(partid)
1052 partrep = op.records.getreplies(partid)
1053 results = partrep['pushkey']
1053 results = partrep['pushkey']
1054 assert len(results) <= 1
1054 assert len(results) <= 1
1055 if not results:
1055 if not results:
1056 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1056 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1057 else:
1057 else:
1058 ret = int(results[0]['return'])
1058 ret = int(results[0]['return'])
1059 if ret:
1059 if ret:
1060 ui.status(bookmsgmap[action][0] % book)
1060 ui.status(bookmsgmap[action][0] % book)
1061 else:
1061 else:
1062 ui.warn(bookmsgmap[action][1] % book)
1062 ui.warn(bookmsgmap[action][1] % book)
1063 if pushop.bkresult is not None:
1063 if pushop.bkresult is not None:
1064 pushop.bkresult = 1
1064 pushop.bkresult = 1
1065 return handlereply
1065 return handlereply
1066
1066
1067 @b2partsgenerator('pushvars', idx=0)
1067 @b2partsgenerator('pushvars', idx=0)
1068 def _getbundlesendvars(pushop, bundler):
1068 def _getbundlesendvars(pushop, bundler):
1069 '''send shellvars via bundle2'''
1069 '''send shellvars via bundle2'''
1070 pushvars = pushop.pushvars
1070 pushvars = pushop.pushvars
1071 if pushvars:
1071 if pushvars:
1072 shellvars = {}
1072 shellvars = {}
1073 for raw in pushvars:
1073 for raw in pushvars:
1074 if '=' not in raw:
1074 if '=' not in raw:
1075 msg = ("unable to parse variable '%s', should follow "
1075 msg = ("unable to parse variable '%s', should follow "
1076 "'KEY=VALUE' or 'KEY=' format")
1076 "'KEY=VALUE' or 'KEY=' format")
1077 raise error.Abort(msg % raw)
1077 raise error.Abort(msg % raw)
1078 k, v = raw.split('=', 1)
1078 k, v = raw.split('=', 1)
1079 shellvars[k] = v
1079 shellvars[k] = v
1080
1080
1081 part = bundler.newpart('pushvars')
1081 part = bundler.newpart('pushvars')
1082
1082
1083 for key, value in shellvars.iteritems():
1083 for key, value in shellvars.iteritems():
1084 part.addparam(key, value, mandatory=False)
1084 part.addparam(key, value, mandatory=False)
1085
1085
1086 def _pushbundle2(pushop):
1086 def _pushbundle2(pushop):
1087 """push data to the remote using bundle2
1087 """push data to the remote using bundle2
1088
1088
1089 The only currently supported type of data is changegroup but this will
1089 The only currently supported type of data is changegroup but this will
1090 evolve in the future."""
1090 evolve in the future."""
1091 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1091 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1092 pushback = (pushop.trmanager
1092 pushback = (pushop.trmanager
1093 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1093 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1094
1094
1095 # create reply capability
1095 # create reply capability
1096 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1096 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1097 allowpushback=pushback,
1097 allowpushback=pushback,
1098 role='client'))
1098 role='client'))
1099 bundler.newpart('replycaps', data=capsblob)
1099 bundler.newpart('replycaps', data=capsblob)
1100 replyhandlers = []
1100 replyhandlers = []
1101 for partgenname in b2partsgenorder:
1101 for partgenname in b2partsgenorder:
1102 partgen = b2partsgenmapping[partgenname]
1102 partgen = b2partsgenmapping[partgenname]
1103 ret = partgen(pushop, bundler)
1103 ret = partgen(pushop, bundler)
1104 if callable(ret):
1104 if callable(ret):
1105 replyhandlers.append(ret)
1105 replyhandlers.append(ret)
1106 # do not push if nothing to push
1106 # do not push if nothing to push
1107 if bundler.nbparts <= 1:
1107 if bundler.nbparts <= 1:
1108 return
1108 return
1109 stream = util.chunkbuffer(bundler.getchunks())
1109 stream = util.chunkbuffer(bundler.getchunks())
1110 try:
1110 try:
1111 try:
1111 try:
1112 with pushop.remote.commandexecutor() as e:
1112 with pushop.remote.commandexecutor() as e:
1113 reply = e.callcommand('unbundle', {
1113 reply = e.callcommand('unbundle', {
1114 'bundle': stream,
1114 'bundle': stream,
1115 'heads': ['force'],
1115 'heads': ['force'],
1116 'url': pushop.remote.url(),
1116 'url': pushop.remote.url(),
1117 }).result()
1117 }).result()
1118 except error.BundleValueError as exc:
1118 except error.BundleValueError as exc:
1119 raise error.Abort(_('missing support for %s') % exc)
1119 raise error.Abort(_('missing support for %s') % exc)
1120 try:
1120 try:
1121 trgetter = None
1121 trgetter = None
1122 if pushback:
1122 if pushback:
1123 trgetter = pushop.trmanager.transaction
1123 trgetter = pushop.trmanager.transaction
1124 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1124 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1125 except error.BundleValueError as exc:
1125 except error.BundleValueError as exc:
1126 raise error.Abort(_('missing support for %s') % exc)
1126 raise error.Abort(_('missing support for %s') % exc)
1127 except bundle2.AbortFromPart as exc:
1127 except bundle2.AbortFromPart as exc:
1128 pushop.ui.status(_('remote: %s\n') % exc)
1128 pushop.ui.status(_('remote: %s\n') % exc)
1129 if exc.hint is not None:
1129 if exc.hint is not None:
1130 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1130 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1131 raise error.Abort(_('push failed on remote'))
1131 raise error.Abort(_('push failed on remote'))
1132 except error.PushkeyFailed as exc:
1132 except error.PushkeyFailed as exc:
1133 partid = int(exc.partid)
1133 partid = int(exc.partid)
1134 if partid not in pushop.pkfailcb:
1134 if partid not in pushop.pkfailcb:
1135 raise
1135 raise
1136 pushop.pkfailcb[partid](pushop, exc)
1136 pushop.pkfailcb[partid](pushop, exc)
1137 for rephand in replyhandlers:
1137 for rephand in replyhandlers:
1138 rephand(op)
1138 rephand(op)
1139
1139
1140 def _pushchangeset(pushop):
1140 def _pushchangeset(pushop):
1141 """Make the actual push of changeset bundle to remote repo"""
1141 """Make the actual push of changeset bundle to remote repo"""
1142 if 'changesets' in pushop.stepsdone:
1142 if 'changesets' in pushop.stepsdone:
1143 return
1143 return
1144 pushop.stepsdone.add('changesets')
1144 pushop.stepsdone.add('changesets')
1145 if not _pushcheckoutgoing(pushop):
1145 if not _pushcheckoutgoing(pushop):
1146 return
1146 return
1147
1147
1148 # Should have verified this in push().
1148 # Should have verified this in push().
1149 assert pushop.remote.capable('unbundle')
1149 assert pushop.remote.capable('unbundle')
1150
1150
1151 pushop.repo.prepushoutgoinghooks(pushop)
1151 pushop.repo.prepushoutgoinghooks(pushop)
1152 outgoing = pushop.outgoing
1152 outgoing = pushop.outgoing
1153 # TODO: get bundlecaps from remote
1153 # TODO: get bundlecaps from remote
1154 bundlecaps = None
1154 bundlecaps = None
1155 # create a changegroup from local
1155 # create a changegroup from local
1156 if pushop.revs is None and not (outgoing.excluded
1156 if pushop.revs is None and not (outgoing.excluded
1157 or pushop.repo.changelog.filteredrevs):
1157 or pushop.repo.changelog.filteredrevs):
1158 # push everything,
1158 # push everything,
1159 # use the fast path, no race possible on push
1159 # use the fast path, no race possible on push
1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1161 fastpath=True, bundlecaps=bundlecaps)
1161 fastpath=True, bundlecaps=bundlecaps)
1162 else:
1162 else:
1163 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1163 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1164 'push', bundlecaps=bundlecaps)
1164 'push', bundlecaps=bundlecaps)
1165
1165
1166 # apply changegroup to remote
1166 # apply changegroup to remote
1167 # local repo finds heads on server, finds out what
1167 # local repo finds heads on server, finds out what
1168 # revs it must push. once revs transferred, if server
1168 # revs it must push. once revs transferred, if server
1169 # finds it has different heads (someone else won
1169 # finds it has different heads (someone else won
1170 # commit/push race), server aborts.
1170 # commit/push race), server aborts.
1171 if pushop.force:
1171 if pushop.force:
1172 remoteheads = ['force']
1172 remoteheads = ['force']
1173 else:
1173 else:
1174 remoteheads = pushop.remoteheads
1174 remoteheads = pushop.remoteheads
1175 # ssh: return remote's addchangegroup()
1175 # ssh: return remote's addchangegroup()
1176 # http: return remote's addchangegroup() or 0 for error
1176 # http: return remote's addchangegroup() or 0 for error
1177 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1177 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1178 pushop.repo.url())
1178 pushop.repo.url())
1179
1179
1180 def _pushsyncphase(pushop):
1180 def _pushsyncphase(pushop):
1181 """synchronise phase information locally and remotely"""
1181 """synchronise phase information locally and remotely"""
1182 cheads = pushop.commonheads
1182 cheads = pushop.commonheads
1183 # even when we don't push, exchanging phase data is useful
1183 # even when we don't push, exchanging phase data is useful
1184 remotephases = listkeys(pushop.remote, 'phases')
1184 remotephases = listkeys(pushop.remote, 'phases')
1185 if (pushop.ui.configbool('ui', '_usedassubrepo')
1185 if (pushop.ui.configbool('ui', '_usedassubrepo')
1186 and remotephases # server supports phases
1186 and remotephases # server supports phases
1187 and pushop.cgresult is None # nothing was pushed
1187 and pushop.cgresult is None # nothing was pushed
1188 and remotephases.get('publishing', False)):
1188 and remotephases.get('publishing', False)):
1189 # When:
1189 # When:
1190 # - this is a subrepo push
1190 # - this is a subrepo push
1191 # - and remote support phase
1191 # - and remote support phase
1192 # - and no changeset was pushed
1192 # - and no changeset was pushed
1193 # - and remote is publishing
1193 # - and remote is publishing
1194 # We may be in issue 3871 case!
1194 # We may be in issue 3871 case!
1195 # We drop the possible phase synchronisation done by
1195 # We drop the possible phase synchronisation done by
1196 # courtesy to publish changesets possibly locally draft
1196 # courtesy to publish changesets possibly locally draft
1197 # on the remote.
1197 # on the remote.
1198 remotephases = {'publishing': 'True'}
1198 remotephases = {'publishing': 'True'}
1199 if not remotephases: # old server or public only reply from non-publishing
1199 if not remotephases: # old server or public only reply from non-publishing
1200 _localphasemove(pushop, cheads)
1200 _localphasemove(pushop, cheads)
1201 # don't push any phase data as there is nothing to push
1201 # don't push any phase data as there is nothing to push
1202 else:
1202 else:
1203 ana = phases.analyzeremotephases(pushop.repo, cheads,
1203 ana = phases.analyzeremotephases(pushop.repo, cheads,
1204 remotephases)
1204 remotephases)
1205 pheads, droots = ana
1205 pheads, droots = ana
1206 ### Apply remote phase on local
1206 ### Apply remote phase on local
1207 if remotephases.get('publishing', False):
1207 if remotephases.get('publishing', False):
1208 _localphasemove(pushop, cheads)
1208 _localphasemove(pushop, cheads)
1209 else: # publish = False
1209 else: # publish = False
1210 _localphasemove(pushop, pheads)
1210 _localphasemove(pushop, pheads)
1211 _localphasemove(pushop, cheads, phases.draft)
1211 _localphasemove(pushop, cheads, phases.draft)
1212 ### Apply local phase on remote
1212 ### Apply local phase on remote
1213
1213
1214 if pushop.cgresult:
1214 if pushop.cgresult:
1215 if 'phases' in pushop.stepsdone:
1215 if 'phases' in pushop.stepsdone:
1216 # phases already pushed though bundle2
1216 # phases already pushed though bundle2
1217 return
1217 return
1218 outdated = pushop.outdatedphases
1218 outdated = pushop.outdatedphases
1219 else:
1219 else:
1220 outdated = pushop.fallbackoutdatedphases
1220 outdated = pushop.fallbackoutdatedphases
1221
1221
1222 pushop.stepsdone.add('phases')
1222 pushop.stepsdone.add('phases')
1223
1223
1224 # filter heads already turned public by the push
1224 # filter heads already turned public by the push
1225 outdated = [c for c in outdated if c.node() not in pheads]
1225 outdated = [c for c in outdated if c.node() not in pheads]
1226 # fallback to independent pushkey command
1226 # fallback to independent pushkey command
1227 for newremotehead in outdated:
1227 for newremotehead in outdated:
1228 with pushop.remote.commandexecutor() as e:
1228 with pushop.remote.commandexecutor() as e:
1229 r = e.callcommand('pushkey', {
1229 r = e.callcommand('pushkey', {
1230 'namespace': 'phases',
1230 'namespace': 'phases',
1231 'key': newremotehead.hex(),
1231 'key': newremotehead.hex(),
1232 'old': '%d' % phases.draft,
1232 'old': '%d' % phases.draft,
1233 'new': '%d' % phases.public
1233 'new': '%d' % phases.public
1234 }).result()
1234 }).result()
1235
1235
1236 if not r:
1236 if not r:
1237 pushop.ui.warn(_('updating %s to public failed!\n')
1237 pushop.ui.warn(_('updating %s to public failed!\n')
1238 % newremotehead)
1238 % newremotehead)
1239
1239
1240 def _localphasemove(pushop, nodes, phase=phases.public):
1240 def _localphasemove(pushop, nodes, phase=phases.public):
1241 """move <nodes> to <phase> in the local source repo"""
1241 """move <nodes> to <phase> in the local source repo"""
1242 if pushop.trmanager:
1242 if pushop.trmanager:
1243 phases.advanceboundary(pushop.repo,
1243 phases.advanceboundary(pushop.repo,
1244 pushop.trmanager.transaction(),
1244 pushop.trmanager.transaction(),
1245 phase,
1245 phase,
1246 nodes)
1246 nodes)
1247 else:
1247 else:
1248 # repo is not locked, do not change any phases!
1248 # repo is not locked, do not change any phases!
1249 # Informs the user that phases should have been moved when
1249 # Informs the user that phases should have been moved when
1250 # applicable.
1250 # applicable.
1251 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1251 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1252 phasestr = phases.phasenames[phase]
1252 phasestr = phases.phasenames[phase]
1253 if actualmoves:
1253 if actualmoves:
1254 pushop.ui.status(_('cannot lock source repo, skipping '
1254 pushop.ui.status(_('cannot lock source repo, skipping '
1255 'local %s phase update\n') % phasestr)
1255 'local %s phase update\n') % phasestr)
1256
1256
1257 def _pushobsolete(pushop):
1257 def _pushobsolete(pushop):
1258 """utility function to push obsolete markers to a remote"""
1258 """utility function to push obsolete markers to a remote"""
1259 if 'obsmarkers' in pushop.stepsdone:
1259 if 'obsmarkers' in pushop.stepsdone:
1260 return
1260 return
1261 repo = pushop.repo
1261 repo = pushop.repo
1262 remote = pushop.remote
1262 remote = pushop.remote
1263 pushop.stepsdone.add('obsmarkers')
1263 pushop.stepsdone.add('obsmarkers')
1264 if pushop.outobsmarkers:
1264 if pushop.outobsmarkers:
1265 pushop.ui.debug('try to push obsolete markers to remote\n')
1265 pushop.ui.debug('try to push obsolete markers to remote\n')
1266 rslts = []
1266 rslts = []
1267 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1267 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1268 for key in sorted(remotedata, reverse=True):
1268 for key in sorted(remotedata, reverse=True):
1269 # reverse sort to ensure we end with dump0
1269 # reverse sort to ensure we end with dump0
1270 data = remotedata[key]
1270 data = remotedata[key]
1271 rslts.append(remote.pushkey('obsolete', key, '', data))
1271 rslts.append(remote.pushkey('obsolete', key, '', data))
1272 if [r for r in rslts if not r]:
1272 if [r for r in rslts if not r]:
1273 msg = _('failed to push some obsolete markers!\n')
1273 msg = _('failed to push some obsolete markers!\n')
1274 repo.ui.warn(msg)
1274 repo.ui.warn(msg)
1275
1275
1276 def _pushbookmark(pushop):
1276 def _pushbookmark(pushop):
1277 """Update bookmark position on remote"""
1277 """Update bookmark position on remote"""
1278 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1278 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1279 return
1279 return
1280 pushop.stepsdone.add('bookmarks')
1280 pushop.stepsdone.add('bookmarks')
1281 ui = pushop.ui
1281 ui = pushop.ui
1282 remote = pushop.remote
1282 remote = pushop.remote
1283
1283
1284 for b, old, new in pushop.outbookmarks:
1284 for b, old, new in pushop.outbookmarks:
1285 action = 'update'
1285 action = 'update'
1286 if not old:
1286 if not old:
1287 action = 'export'
1287 action = 'export'
1288 elif not new:
1288 elif not new:
1289 action = 'delete'
1289 action = 'delete'
1290
1290
1291 with remote.commandexecutor() as e:
1291 with remote.commandexecutor() as e:
1292 r = e.callcommand('pushkey', {
1292 r = e.callcommand('pushkey', {
1293 'namespace': 'bookmarks',
1293 'namespace': 'bookmarks',
1294 'key': b,
1294 'key': b,
1295 'old': old,
1295 'old': old,
1296 'new': new,
1296 'new': new,
1297 }).result()
1297 }).result()
1298
1298
1299 if r:
1299 if r:
1300 ui.status(bookmsgmap[action][0] % b)
1300 ui.status(bookmsgmap[action][0] % b)
1301 else:
1301 else:
1302 ui.warn(bookmsgmap[action][1] % b)
1302 ui.warn(bookmsgmap[action][1] % b)
1303 # discovery can have set the value form invalid entry
1303 # discovery can have set the value form invalid entry
1304 if pushop.bkresult is not None:
1304 if pushop.bkresult is not None:
1305 pushop.bkresult = 1
1305 pushop.bkresult = 1
1306
1306
1307 class pulloperation(object):
1307 class pulloperation(object):
1308 """A object that represent a single pull operation
1308 """A object that represent a single pull operation
1309
1309
1310 It purpose is to carry pull related state and very common operation.
1310 It purpose is to carry pull related state and very common operation.
1311
1311
1312 A new should be created at the beginning of each pull and discarded
1312 A new should be created at the beginning of each pull and discarded
1313 afterward.
1313 afterward.
1314 """
1314 """
1315
1315
1316 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1316 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1317 remotebookmarks=None, streamclonerequested=None,
1317 remotebookmarks=None, streamclonerequested=None,
1318 includepats=None, excludepats=None):
1318 includepats=None, excludepats=None):
1319 # repo we pull into
1319 # repo we pull into
1320 self.repo = repo
1320 self.repo = repo
1321 # repo we pull from
1321 # repo we pull from
1322 self.remote = remote
1322 self.remote = remote
1323 # revision we try to pull (None is "all")
1323 # revision we try to pull (None is "all")
1324 self.heads = heads
1324 self.heads = heads
1325 # bookmark pulled explicitly
1325 # bookmark pulled explicitly
1326 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1326 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1327 for bookmark in bookmarks]
1327 for bookmark in bookmarks]
1328 # do we force pull?
1328 # do we force pull?
1329 self.force = force
1329 self.force = force
1330 # whether a streaming clone was requested
1330 # whether a streaming clone was requested
1331 self.streamclonerequested = streamclonerequested
1331 self.streamclonerequested = streamclonerequested
1332 # transaction manager
1332 # transaction manager
1333 self.trmanager = None
1333 self.trmanager = None
1334 # set of common changeset between local and remote before pull
1334 # set of common changeset between local and remote before pull
1335 self.common = None
1335 self.common = None
1336 # set of pulled head
1336 # set of pulled head
1337 self.rheads = None
1337 self.rheads = None
1338 # list of missing changeset to fetch remotely
1338 # list of missing changeset to fetch remotely
1339 self.fetch = None
1339 self.fetch = None
1340 # remote bookmarks data
1340 # remote bookmarks data
1341 self.remotebookmarks = remotebookmarks
1341 self.remotebookmarks = remotebookmarks
1342 # result of changegroup pulling (used as return code by pull)
1342 # result of changegroup pulling (used as return code by pull)
1343 self.cgresult = None
1343 self.cgresult = None
1344 # list of step already done
1344 # list of step already done
1345 self.stepsdone = set()
1345 self.stepsdone = set()
1346 # Whether we attempted a clone from pre-generated bundles.
1346 # Whether we attempted a clone from pre-generated bundles.
1347 self.clonebundleattempted = False
1347 self.clonebundleattempted = False
1348 # Set of file patterns to include.
1348 # Set of file patterns to include.
1349 self.includepats = includepats
1349 self.includepats = includepats
1350 # Set of file patterns to exclude.
1350 # Set of file patterns to exclude.
1351 self.excludepats = excludepats
1351 self.excludepats = excludepats
1352
1352
1353 @util.propertycache
1353 @util.propertycache
1354 def pulledsubset(self):
1354 def pulledsubset(self):
1355 """heads of the set of changeset target by the pull"""
1355 """heads of the set of changeset target by the pull"""
1356 # compute target subset
1356 # compute target subset
1357 if self.heads is None:
1357 if self.heads is None:
1358 # We pulled every thing possible
1358 # We pulled every thing possible
1359 # sync on everything common
1359 # sync on everything common
1360 c = set(self.common)
1360 c = set(self.common)
1361 ret = list(self.common)
1361 ret = list(self.common)
1362 for n in self.rheads:
1362 for n in self.rheads:
1363 if n not in c:
1363 if n not in c:
1364 ret.append(n)
1364 ret.append(n)
1365 return ret
1365 return ret
1366 else:
1366 else:
1367 # We pulled a specific subset
1367 # We pulled a specific subset
1368 # sync on this subset
1368 # sync on this subset
1369 return self.heads
1369 return self.heads
1370
1370
1371 @util.propertycache
1371 @util.propertycache
1372 def canusebundle2(self):
1372 def canusebundle2(self):
1373 return not _forcebundle1(self)
1373 return not _forcebundle1(self)
1374
1374
1375 @util.propertycache
1375 @util.propertycache
1376 def remotebundle2caps(self):
1376 def remotebundle2caps(self):
1377 return bundle2.bundle2caps(self.remote)
1377 return bundle2.bundle2caps(self.remote)
1378
1378
1379 def gettransaction(self):
1379 def gettransaction(self):
1380 # deprecated; talk to trmanager directly
1380 # deprecated; talk to trmanager directly
1381 return self.trmanager.transaction()
1381 return self.trmanager.transaction()
1382
1382
1383 class transactionmanager(util.transactional):
1383 class transactionmanager(util.transactional):
1384 """An object to manage the life cycle of a transaction
1384 """An object to manage the life cycle of a transaction
1385
1385
1386 It creates the transaction on demand and calls the appropriate hooks when
1386 It creates the transaction on demand and calls the appropriate hooks when
1387 closing the transaction."""
1387 closing the transaction."""
1388 def __init__(self, repo, source, url):
1388 def __init__(self, repo, source, url):
1389 self.repo = repo
1389 self.repo = repo
1390 self.source = source
1390 self.source = source
1391 self.url = url
1391 self.url = url
1392 self._tr = None
1392 self._tr = None
1393
1393
1394 def transaction(self):
1394 def transaction(self):
1395 """Return an open transaction object, constructing if necessary"""
1395 """Return an open transaction object, constructing if necessary"""
1396 if not self._tr:
1396 if not self._tr:
1397 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1397 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1398 self._tr = self.repo.transaction(trname)
1398 self._tr = self.repo.transaction(trname)
1399 self._tr.hookargs['source'] = self.source
1399 self._tr.hookargs['source'] = self.source
1400 self._tr.hookargs['url'] = self.url
1400 self._tr.hookargs['url'] = self.url
1401 return self._tr
1401 return self._tr
1402
1402
1403 def close(self):
1403 def close(self):
1404 """close transaction if created"""
1404 """close transaction if created"""
1405 if self._tr is not None:
1405 if self._tr is not None:
1406 self._tr.close()
1406 self._tr.close()
1407
1407
1408 def release(self):
1408 def release(self):
1409 """release transaction if created"""
1409 """release transaction if created"""
1410 if self._tr is not None:
1410 if self._tr is not None:
1411 self._tr.release()
1411 self._tr.release()
1412
1412
1413 def listkeys(remote, namespace):
1413 def listkeys(remote, namespace):
1414 with remote.commandexecutor() as e:
1414 with remote.commandexecutor() as e:
1415 return e.callcommand('listkeys', {'namespace': namespace}).result()
1415 return e.callcommand('listkeys', {'namespace': namespace}).result()
1416
1416
1417 def _fullpullbundle2(repo, pullop):
1417 def _fullpullbundle2(repo, pullop):
1418 # The server may send a partial reply, i.e. when inlining
1418 # The server may send a partial reply, i.e. when inlining
1419 # pre-computed bundles. In that case, update the common
1419 # pre-computed bundles. In that case, update the common
1420 # set based on the results and pull another bundle.
1420 # set based on the results and pull another bundle.
1421 #
1421 #
1422 # There are two indicators that the process is finished:
1422 # There are two indicators that the process is finished:
1423 # - no changeset has been added, or
1423 # - no changeset has been added, or
1424 # - all remote heads are known locally.
1424 # - all remote heads are known locally.
1425 # The head check must use the unfiltered view as obsoletion
1425 # The head check must use the unfiltered view as obsoletion
1426 # markers can hide heads.
1426 # markers can hide heads.
1427 unfi = repo.unfiltered()
1427 unfi = repo.unfiltered()
1428 unficl = unfi.changelog
1428 unficl = unfi.changelog
1429 def headsofdiff(h1, h2):
1429 def headsofdiff(h1, h2):
1430 """Returns heads(h1 % h2)"""
1430 """Returns heads(h1 % h2)"""
1431 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1431 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1432 return set(ctx.node() for ctx in res)
1432 return set(ctx.node() for ctx in res)
1433 def headsofunion(h1, h2):
1433 def headsofunion(h1, h2):
1434 """Returns heads((h1 + h2) - null)"""
1434 """Returns heads((h1 + h2) - null)"""
1435 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1435 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1436 return set(ctx.node() for ctx in res)
1436 return set(ctx.node() for ctx in res)
1437 while True:
1437 while True:
1438 old_heads = unficl.heads()
1438 old_heads = unficl.heads()
1439 clstart = len(unficl)
1439 clstart = len(unficl)
1440 _pullbundle2(pullop)
1440 _pullbundle2(pullop)
1441 if repository.NARROW_REQUIREMENT in repo.requirements:
1441 if repository.NARROW_REQUIREMENT in repo.requirements:
1442 # XXX narrow clones filter the heads on the server side during
1442 # XXX narrow clones filter the heads on the server side during
1443 # XXX getbundle and result in partial replies as well.
1443 # XXX getbundle and result in partial replies as well.
1444 # XXX Disable pull bundles in this case as band aid to avoid
1444 # XXX Disable pull bundles in this case as band aid to avoid
1445 # XXX extra round trips.
1445 # XXX extra round trips.
1446 break
1446 break
1447 if clstart == len(unficl):
1447 if clstart == len(unficl):
1448 break
1448 break
1449 if all(unficl.hasnode(n) for n in pullop.rheads):
1449 if all(unficl.hasnode(n) for n in pullop.rheads):
1450 break
1450 break
1451 new_heads = headsofdiff(unficl.heads(), old_heads)
1451 new_heads = headsofdiff(unficl.heads(), old_heads)
1452 pullop.common = headsofunion(new_heads, pullop.common)
1452 pullop.common = headsofunion(new_heads, pullop.common)
1453 pullop.rheads = set(pullop.rheads) - pullop.common
1453 pullop.rheads = set(pullop.rheads) - pullop.common
1454
1454
1455 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1455 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1456 streamclonerequested=None, includepats=None, excludepats=None):
1456 streamclonerequested=None, includepats=None, excludepats=None):
1457 """Fetch repository data from a remote.
1457 """Fetch repository data from a remote.
1458
1458
1459 This is the main function used to retrieve data from a remote repository.
1459 This is the main function used to retrieve data from a remote repository.
1460
1460
1461 ``repo`` is the local repository to clone into.
1461 ``repo`` is the local repository to clone into.
1462 ``remote`` is a peer instance.
1462 ``remote`` is a peer instance.
1463 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1463 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1464 default) means to pull everything from the remote.
1464 default) means to pull everything from the remote.
1465 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1465 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1466 default, all remote bookmarks are pulled.
1466 default, all remote bookmarks are pulled.
1467 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1467 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1468 initialization.
1468 initialization.
1469 ``streamclonerequested`` is a boolean indicating whether a "streaming
1469 ``streamclonerequested`` is a boolean indicating whether a "streaming
1470 clone" is requested. A "streaming clone" is essentially a raw file copy
1470 clone" is requested. A "streaming clone" is essentially a raw file copy
1471 of revlogs from the server. This only works when the local repository is
1471 of revlogs from the server. This only works when the local repository is
1472 empty. The default value of ``None`` means to respect the server
1472 empty. The default value of ``None`` means to respect the server
1473 configuration for preferring stream clones.
1473 configuration for preferring stream clones.
1474 ``includepats`` and ``excludepats`` define explicit file patterns to
1474 ``includepats`` and ``excludepats`` define explicit file patterns to
1475 include and exclude in storage, respectively. If not defined, narrow
1475 include and exclude in storage, respectively. If not defined, narrow
1476 patterns from the repo instance are used, if available.
1476 patterns from the repo instance are used, if available.
1477
1477
1478 Returns the ``pulloperation`` created for this pull.
1478 Returns the ``pulloperation`` created for this pull.
1479 """
1479 """
1480 if opargs is None:
1480 if opargs is None:
1481 opargs = {}
1481 opargs = {}
1482
1482
1483 # We allow the narrow patterns to be passed in explicitly to provide more
1483 # We allow the narrow patterns to be passed in explicitly to provide more
1484 # flexibility for API consumers.
1484 # flexibility for API consumers.
1485 if includepats or excludepats:
1485 if includepats or excludepats:
1486 includepats = includepats or set()
1486 includepats = includepats or set()
1487 excludepats = excludepats or set()
1487 excludepats = excludepats or set()
1488 else:
1488 else:
1489 includepats, excludepats = repo.narrowpats
1489 includepats, excludepats = repo.narrowpats
1490
1490
1491 narrowspec.validatepatterns(includepats)
1491 narrowspec.validatepatterns(includepats)
1492 narrowspec.validatepatterns(excludepats)
1492 narrowspec.validatepatterns(excludepats)
1493
1493
1494 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1494 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1495 streamclonerequested=streamclonerequested,
1495 streamclonerequested=streamclonerequested,
1496 includepats=includepats, excludepats=excludepats,
1496 includepats=includepats, excludepats=excludepats,
1497 **pycompat.strkwargs(opargs))
1497 **pycompat.strkwargs(opargs))
1498
1498
1499 peerlocal = pullop.remote.local()
1499 peerlocal = pullop.remote.local()
1500 if peerlocal:
1500 if peerlocal:
1501 missing = set(peerlocal.requirements) - pullop.repo.supported
1501 missing = set(peerlocal.requirements) - pullop.repo.supported
1502 if missing:
1502 if missing:
1503 msg = _("required features are not"
1503 msg = _("required features are not"
1504 " supported in the destination:"
1504 " supported in the destination:"
1505 " %s") % (', '.join(sorted(missing)))
1505 " %s") % (', '.join(sorted(missing)))
1506 raise error.Abort(msg)
1506 raise error.Abort(msg)
1507
1507
1508 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1508 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1509 with repo.wlock(), repo.lock(), pullop.trmanager:
1509 with repo.wlock(), repo.lock(), pullop.trmanager:
1510 # Use the modern wire protocol, if available.
1510 # Use the modern wire protocol, if available.
1511 if remote.capable('exchangev2'):
1511 if remote.capable('command-changesetdata'):
1512 exchangev2.pull(pullop)
1512 exchangev2.pull(pullop)
1513 else:
1513 else:
1514 # This should ideally be in _pullbundle2(). However, it needs to run
1514 # This should ideally be in _pullbundle2(). However, it needs to run
1515 # before discovery to avoid extra work.
1515 # before discovery to avoid extra work.
1516 _maybeapplyclonebundle(pullop)
1516 _maybeapplyclonebundle(pullop)
1517 streamclone.maybeperformlegacystreamclone(pullop)
1517 streamclone.maybeperformlegacystreamclone(pullop)
1518 _pulldiscovery(pullop)
1518 _pulldiscovery(pullop)
1519 if pullop.canusebundle2:
1519 if pullop.canusebundle2:
1520 _fullpullbundle2(repo, pullop)
1520 _fullpullbundle2(repo, pullop)
1521 _pullchangeset(pullop)
1521 _pullchangeset(pullop)
1522 _pullphase(pullop)
1522 _pullphase(pullop)
1523 _pullbookmarks(pullop)
1523 _pullbookmarks(pullop)
1524 _pullobsolete(pullop)
1524 _pullobsolete(pullop)
1525
1525
1526 # storing remotenames
1526 # storing remotenames
1527 if repo.ui.configbool('experimental', 'remotenames'):
1527 if repo.ui.configbool('experimental', 'remotenames'):
1528 logexchange.pullremotenames(repo, remote)
1528 logexchange.pullremotenames(repo, remote)
1529
1529
1530 return pullop
1530 return pullop
1531
1531
1532 # list of steps to perform discovery before pull
1532 # list of steps to perform discovery before pull
1533 pulldiscoveryorder = []
1533 pulldiscoveryorder = []
1534
1534
1535 # Mapping between step name and function
1535 # Mapping between step name and function
1536 #
1536 #
1537 # This exists to help extensions wrap steps if necessary
1537 # This exists to help extensions wrap steps if necessary
1538 pulldiscoverymapping = {}
1538 pulldiscoverymapping = {}
1539
1539
1540 def pulldiscovery(stepname):
1540 def pulldiscovery(stepname):
1541 """decorator for function performing discovery before pull
1541 """decorator for function performing discovery before pull
1542
1542
1543 The function is added to the step -> function mapping and appended to the
1543 The function is added to the step -> function mapping and appended to the
1544 list of steps. Beware that decorated function will be added in order (this
1544 list of steps. Beware that decorated function will be added in order (this
1545 may matter).
1545 may matter).
1546
1546
1547 You can only use this decorator for a new step, if you want to wrap a step
1547 You can only use this decorator for a new step, if you want to wrap a step
1548 from an extension, change the pulldiscovery dictionary directly."""
1548 from an extension, change the pulldiscovery dictionary directly."""
1549 def dec(func):
1549 def dec(func):
1550 assert stepname not in pulldiscoverymapping
1550 assert stepname not in pulldiscoverymapping
1551 pulldiscoverymapping[stepname] = func
1551 pulldiscoverymapping[stepname] = func
1552 pulldiscoveryorder.append(stepname)
1552 pulldiscoveryorder.append(stepname)
1553 return func
1553 return func
1554 return dec
1554 return dec
1555
1555
1556 def _pulldiscovery(pullop):
1556 def _pulldiscovery(pullop):
1557 """Run all discovery steps"""
1557 """Run all discovery steps"""
1558 for stepname in pulldiscoveryorder:
1558 for stepname in pulldiscoveryorder:
1559 step = pulldiscoverymapping[stepname]
1559 step = pulldiscoverymapping[stepname]
1560 step(pullop)
1560 step(pullop)
1561
1561
1562 @pulldiscovery('b1:bookmarks')
1562 @pulldiscovery('b1:bookmarks')
1563 def _pullbookmarkbundle1(pullop):
1563 def _pullbookmarkbundle1(pullop):
1564 """fetch bookmark data in bundle1 case
1564 """fetch bookmark data in bundle1 case
1565
1565
1566 If not using bundle2, we have to fetch bookmarks before changeset
1566 If not using bundle2, we have to fetch bookmarks before changeset
1567 discovery to reduce the chance and impact of race conditions."""
1567 discovery to reduce the chance and impact of race conditions."""
1568 if pullop.remotebookmarks is not None:
1568 if pullop.remotebookmarks is not None:
1569 return
1569 return
1570 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1570 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1571 # all known bundle2 servers now support listkeys, but lets be nice with
1571 # all known bundle2 servers now support listkeys, but lets be nice with
1572 # new implementation.
1572 # new implementation.
1573 return
1573 return
1574 books = listkeys(pullop.remote, 'bookmarks')
1574 books = listkeys(pullop.remote, 'bookmarks')
1575 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1575 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1576
1576
1577
1577
1578 @pulldiscovery('changegroup')
1578 @pulldiscovery('changegroup')
1579 def _pulldiscoverychangegroup(pullop):
1579 def _pulldiscoverychangegroup(pullop):
1580 """discovery phase for the pull
1580 """discovery phase for the pull
1581
1581
1582 Current handle changeset discovery only, will change handle all discovery
1582 Current handle changeset discovery only, will change handle all discovery
1583 at some point."""
1583 at some point."""
1584 tmp = discovery.findcommonincoming(pullop.repo,
1584 tmp = discovery.findcommonincoming(pullop.repo,
1585 pullop.remote,
1585 pullop.remote,
1586 heads=pullop.heads,
1586 heads=pullop.heads,
1587 force=pullop.force)
1587 force=pullop.force)
1588 common, fetch, rheads = tmp
1588 common, fetch, rheads = tmp
1589 nm = pullop.repo.unfiltered().changelog.nodemap
1589 nm = pullop.repo.unfiltered().changelog.nodemap
1590 if fetch and rheads:
1590 if fetch and rheads:
1591 # If a remote heads is filtered locally, put in back in common.
1591 # If a remote heads is filtered locally, put in back in common.
1592 #
1592 #
1593 # This is a hackish solution to catch most of "common but locally
1593 # This is a hackish solution to catch most of "common but locally
1594 # hidden situation". We do not performs discovery on unfiltered
1594 # hidden situation". We do not performs discovery on unfiltered
1595 # repository because it end up doing a pathological amount of round
1595 # repository because it end up doing a pathological amount of round
1596 # trip for w huge amount of changeset we do not care about.
1596 # trip for w huge amount of changeset we do not care about.
1597 #
1597 #
1598 # If a set of such "common but filtered" changeset exist on the server
1598 # If a set of such "common but filtered" changeset exist on the server
1599 # but are not including a remote heads, we'll not be able to detect it,
1599 # but are not including a remote heads, we'll not be able to detect it,
1600 scommon = set(common)
1600 scommon = set(common)
1601 for n in rheads:
1601 for n in rheads:
1602 if n in nm:
1602 if n in nm:
1603 if n not in scommon:
1603 if n not in scommon:
1604 common.append(n)
1604 common.append(n)
1605 if set(rheads).issubset(set(common)):
1605 if set(rheads).issubset(set(common)):
1606 fetch = []
1606 fetch = []
1607 pullop.common = common
1607 pullop.common = common
1608 pullop.fetch = fetch
1608 pullop.fetch = fetch
1609 pullop.rheads = rheads
1609 pullop.rheads = rheads
1610
1610
1611 def _pullbundle2(pullop):
1611 def _pullbundle2(pullop):
1612 """pull data using bundle2
1612 """pull data using bundle2
1613
1613
1614 For now, the only supported data are changegroup."""
1614 For now, the only supported data are changegroup."""
1615 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1615 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1616
1616
1617 # make ui easier to access
1617 # make ui easier to access
1618 ui = pullop.repo.ui
1618 ui = pullop.repo.ui
1619
1619
1620 # At the moment we don't do stream clones over bundle2. If that is
1620 # At the moment we don't do stream clones over bundle2. If that is
1621 # implemented then here's where the check for that will go.
1621 # implemented then here's where the check for that will go.
1622 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1622 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1623
1623
1624 # declare pull perimeters
1624 # declare pull perimeters
1625 kwargs['common'] = pullop.common
1625 kwargs['common'] = pullop.common
1626 kwargs['heads'] = pullop.heads or pullop.rheads
1626 kwargs['heads'] = pullop.heads or pullop.rheads
1627
1627
1628 if streaming:
1628 if streaming:
1629 kwargs['cg'] = False
1629 kwargs['cg'] = False
1630 kwargs['stream'] = True
1630 kwargs['stream'] = True
1631 pullop.stepsdone.add('changegroup')
1631 pullop.stepsdone.add('changegroup')
1632 pullop.stepsdone.add('phases')
1632 pullop.stepsdone.add('phases')
1633
1633
1634 else:
1634 else:
1635 # pulling changegroup
1635 # pulling changegroup
1636 pullop.stepsdone.add('changegroup')
1636 pullop.stepsdone.add('changegroup')
1637
1637
1638 kwargs['cg'] = pullop.fetch
1638 kwargs['cg'] = pullop.fetch
1639
1639
1640 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1640 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1641 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1641 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1642 if (not legacyphase and hasbinaryphase):
1642 if (not legacyphase and hasbinaryphase):
1643 kwargs['phases'] = True
1643 kwargs['phases'] = True
1644 pullop.stepsdone.add('phases')
1644 pullop.stepsdone.add('phases')
1645
1645
1646 if 'listkeys' in pullop.remotebundle2caps:
1646 if 'listkeys' in pullop.remotebundle2caps:
1647 if 'phases' not in pullop.stepsdone:
1647 if 'phases' not in pullop.stepsdone:
1648 kwargs['listkeys'] = ['phases']
1648 kwargs['listkeys'] = ['phases']
1649
1649
1650 bookmarksrequested = False
1650 bookmarksrequested = False
1651 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1651 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1652 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1652 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1653
1653
1654 if pullop.remotebookmarks is not None:
1654 if pullop.remotebookmarks is not None:
1655 pullop.stepsdone.add('request-bookmarks')
1655 pullop.stepsdone.add('request-bookmarks')
1656
1656
1657 if ('request-bookmarks' not in pullop.stepsdone
1657 if ('request-bookmarks' not in pullop.stepsdone
1658 and pullop.remotebookmarks is None
1658 and pullop.remotebookmarks is None
1659 and not legacybookmark and hasbinarybook):
1659 and not legacybookmark and hasbinarybook):
1660 kwargs['bookmarks'] = True
1660 kwargs['bookmarks'] = True
1661 bookmarksrequested = True
1661 bookmarksrequested = True
1662
1662
1663 if 'listkeys' in pullop.remotebundle2caps:
1663 if 'listkeys' in pullop.remotebundle2caps:
1664 if 'request-bookmarks' not in pullop.stepsdone:
1664 if 'request-bookmarks' not in pullop.stepsdone:
1665 # make sure to always includes bookmark data when migrating
1665 # make sure to always includes bookmark data when migrating
1666 # `hg incoming --bundle` to using this function.
1666 # `hg incoming --bundle` to using this function.
1667 pullop.stepsdone.add('request-bookmarks')
1667 pullop.stepsdone.add('request-bookmarks')
1668 kwargs.setdefault('listkeys', []).append('bookmarks')
1668 kwargs.setdefault('listkeys', []).append('bookmarks')
1669
1669
1670 # If this is a full pull / clone and the server supports the clone bundles
1670 # If this is a full pull / clone and the server supports the clone bundles
1671 # feature, tell the server whether we attempted a clone bundle. The
1671 # feature, tell the server whether we attempted a clone bundle. The
1672 # presence of this flag indicates the client supports clone bundles. This
1672 # presence of this flag indicates the client supports clone bundles. This
1673 # will enable the server to treat clients that support clone bundles
1673 # will enable the server to treat clients that support clone bundles
1674 # differently from those that don't.
1674 # differently from those that don't.
1675 if (pullop.remote.capable('clonebundles')
1675 if (pullop.remote.capable('clonebundles')
1676 and pullop.heads is None and list(pullop.common) == [nullid]):
1676 and pullop.heads is None and list(pullop.common) == [nullid]):
1677 kwargs['cbattempted'] = pullop.clonebundleattempted
1677 kwargs['cbattempted'] = pullop.clonebundleattempted
1678
1678
1679 if streaming:
1679 if streaming:
1680 pullop.repo.ui.status(_('streaming all changes\n'))
1680 pullop.repo.ui.status(_('streaming all changes\n'))
1681 elif not pullop.fetch:
1681 elif not pullop.fetch:
1682 pullop.repo.ui.status(_("no changes found\n"))
1682 pullop.repo.ui.status(_("no changes found\n"))
1683 pullop.cgresult = 0
1683 pullop.cgresult = 0
1684 else:
1684 else:
1685 if pullop.heads is None and list(pullop.common) == [nullid]:
1685 if pullop.heads is None and list(pullop.common) == [nullid]:
1686 pullop.repo.ui.status(_("requesting all changes\n"))
1686 pullop.repo.ui.status(_("requesting all changes\n"))
1687 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1687 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1688 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1688 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1689 if obsolete.commonversion(remoteversions) is not None:
1689 if obsolete.commonversion(remoteversions) is not None:
1690 kwargs['obsmarkers'] = True
1690 kwargs['obsmarkers'] = True
1691 pullop.stepsdone.add('obsmarkers')
1691 pullop.stepsdone.add('obsmarkers')
1692 _pullbundle2extraprepare(pullop, kwargs)
1692 _pullbundle2extraprepare(pullop, kwargs)
1693
1693
1694 with pullop.remote.commandexecutor() as e:
1694 with pullop.remote.commandexecutor() as e:
1695 args = dict(kwargs)
1695 args = dict(kwargs)
1696 args['source'] = 'pull'
1696 args['source'] = 'pull'
1697 bundle = e.callcommand('getbundle', args).result()
1697 bundle = e.callcommand('getbundle', args).result()
1698
1698
1699 try:
1699 try:
1700 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1700 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1701 source='pull')
1701 source='pull')
1702 op.modes['bookmarks'] = 'records'
1702 op.modes['bookmarks'] = 'records'
1703 bundle2.processbundle(pullop.repo, bundle, op=op)
1703 bundle2.processbundle(pullop.repo, bundle, op=op)
1704 except bundle2.AbortFromPart as exc:
1704 except bundle2.AbortFromPart as exc:
1705 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1705 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1706 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1706 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1707 except error.BundleValueError as exc:
1707 except error.BundleValueError as exc:
1708 raise error.Abort(_('missing support for %s') % exc)
1708 raise error.Abort(_('missing support for %s') % exc)
1709
1709
1710 if pullop.fetch:
1710 if pullop.fetch:
1711 pullop.cgresult = bundle2.combinechangegroupresults(op)
1711 pullop.cgresult = bundle2.combinechangegroupresults(op)
1712
1712
1713 # processing phases change
1713 # processing phases change
1714 for namespace, value in op.records['listkeys']:
1714 for namespace, value in op.records['listkeys']:
1715 if namespace == 'phases':
1715 if namespace == 'phases':
1716 _pullapplyphases(pullop, value)
1716 _pullapplyphases(pullop, value)
1717
1717
1718 # processing bookmark update
1718 # processing bookmark update
1719 if bookmarksrequested:
1719 if bookmarksrequested:
1720 books = {}
1720 books = {}
1721 for record in op.records['bookmarks']:
1721 for record in op.records['bookmarks']:
1722 books[record['bookmark']] = record["node"]
1722 books[record['bookmark']] = record["node"]
1723 pullop.remotebookmarks = books
1723 pullop.remotebookmarks = books
1724 else:
1724 else:
1725 for namespace, value in op.records['listkeys']:
1725 for namespace, value in op.records['listkeys']:
1726 if namespace == 'bookmarks':
1726 if namespace == 'bookmarks':
1727 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1727 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1728
1728
1729 # bookmark data were either already there or pulled in the bundle
1729 # bookmark data were either already there or pulled in the bundle
1730 if pullop.remotebookmarks is not None:
1730 if pullop.remotebookmarks is not None:
1731 _pullbookmarks(pullop)
1731 _pullbookmarks(pullop)
1732
1732
1733 def _pullbundle2extraprepare(pullop, kwargs):
1733 def _pullbundle2extraprepare(pullop, kwargs):
1734 """hook function so that extensions can extend the getbundle call"""
1734 """hook function so that extensions can extend the getbundle call"""
1735
1735
1736 def _pullchangeset(pullop):
1736 def _pullchangeset(pullop):
1737 """pull changeset from unbundle into the local repo"""
1737 """pull changeset from unbundle into the local repo"""
1738 # We delay the open of the transaction as late as possible so we
1738 # We delay the open of the transaction as late as possible so we
1739 # don't open transaction for nothing or you break future useful
1739 # don't open transaction for nothing or you break future useful
1740 # rollback call
1740 # rollback call
1741 if 'changegroup' in pullop.stepsdone:
1741 if 'changegroup' in pullop.stepsdone:
1742 return
1742 return
1743 pullop.stepsdone.add('changegroup')
1743 pullop.stepsdone.add('changegroup')
1744 if not pullop.fetch:
1744 if not pullop.fetch:
1745 pullop.repo.ui.status(_("no changes found\n"))
1745 pullop.repo.ui.status(_("no changes found\n"))
1746 pullop.cgresult = 0
1746 pullop.cgresult = 0
1747 return
1747 return
1748 tr = pullop.gettransaction()
1748 tr = pullop.gettransaction()
1749 if pullop.heads is None and list(pullop.common) == [nullid]:
1749 if pullop.heads is None and list(pullop.common) == [nullid]:
1750 pullop.repo.ui.status(_("requesting all changes\n"))
1750 pullop.repo.ui.status(_("requesting all changes\n"))
1751 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1751 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1752 # issue1320, avoid a race if remote changed after discovery
1752 # issue1320, avoid a race if remote changed after discovery
1753 pullop.heads = pullop.rheads
1753 pullop.heads = pullop.rheads
1754
1754
1755 if pullop.remote.capable('getbundle'):
1755 if pullop.remote.capable('getbundle'):
1756 # TODO: get bundlecaps from remote
1756 # TODO: get bundlecaps from remote
1757 cg = pullop.remote.getbundle('pull', common=pullop.common,
1757 cg = pullop.remote.getbundle('pull', common=pullop.common,
1758 heads=pullop.heads or pullop.rheads)
1758 heads=pullop.heads or pullop.rheads)
1759 elif pullop.heads is None:
1759 elif pullop.heads is None:
1760 with pullop.remote.commandexecutor() as e:
1760 with pullop.remote.commandexecutor() as e:
1761 cg = e.callcommand('changegroup', {
1761 cg = e.callcommand('changegroup', {
1762 'nodes': pullop.fetch,
1762 'nodes': pullop.fetch,
1763 'source': 'pull',
1763 'source': 'pull',
1764 }).result()
1764 }).result()
1765
1765
1766 elif not pullop.remote.capable('changegroupsubset'):
1766 elif not pullop.remote.capable('changegroupsubset'):
1767 raise error.Abort(_("partial pull cannot be done because "
1767 raise error.Abort(_("partial pull cannot be done because "
1768 "other repository doesn't support "
1768 "other repository doesn't support "
1769 "changegroupsubset."))
1769 "changegroupsubset."))
1770 else:
1770 else:
1771 with pullop.remote.commandexecutor() as e:
1771 with pullop.remote.commandexecutor() as e:
1772 cg = e.callcommand('changegroupsubset', {
1772 cg = e.callcommand('changegroupsubset', {
1773 'bases': pullop.fetch,
1773 'bases': pullop.fetch,
1774 'heads': pullop.heads,
1774 'heads': pullop.heads,
1775 'source': 'pull',
1775 'source': 'pull',
1776 }).result()
1776 }).result()
1777
1777
1778 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1778 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1779 pullop.remote.url())
1779 pullop.remote.url())
1780 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1780 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1781
1781
1782 def _pullphase(pullop):
1782 def _pullphase(pullop):
1783 # Get remote phases data from remote
1783 # Get remote phases data from remote
1784 if 'phases' in pullop.stepsdone:
1784 if 'phases' in pullop.stepsdone:
1785 return
1785 return
1786 remotephases = listkeys(pullop.remote, 'phases')
1786 remotephases = listkeys(pullop.remote, 'phases')
1787 _pullapplyphases(pullop, remotephases)
1787 _pullapplyphases(pullop, remotephases)
1788
1788
1789 def _pullapplyphases(pullop, remotephases):
1789 def _pullapplyphases(pullop, remotephases):
1790 """apply phase movement from observed remote state"""
1790 """apply phase movement from observed remote state"""
1791 if 'phases' in pullop.stepsdone:
1791 if 'phases' in pullop.stepsdone:
1792 return
1792 return
1793 pullop.stepsdone.add('phases')
1793 pullop.stepsdone.add('phases')
1794 publishing = bool(remotephases.get('publishing', False))
1794 publishing = bool(remotephases.get('publishing', False))
1795 if remotephases and not publishing:
1795 if remotephases and not publishing:
1796 # remote is new and non-publishing
1796 # remote is new and non-publishing
1797 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1797 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1798 pullop.pulledsubset,
1798 pullop.pulledsubset,
1799 remotephases)
1799 remotephases)
1800 dheads = pullop.pulledsubset
1800 dheads = pullop.pulledsubset
1801 else:
1801 else:
1802 # Remote is old or publishing all common changesets
1802 # Remote is old or publishing all common changesets
1803 # should be seen as public
1803 # should be seen as public
1804 pheads = pullop.pulledsubset
1804 pheads = pullop.pulledsubset
1805 dheads = []
1805 dheads = []
1806 unfi = pullop.repo.unfiltered()
1806 unfi = pullop.repo.unfiltered()
1807 phase = unfi._phasecache.phase
1807 phase = unfi._phasecache.phase
1808 rev = unfi.changelog.nodemap.get
1808 rev = unfi.changelog.nodemap.get
1809 public = phases.public
1809 public = phases.public
1810 draft = phases.draft
1810 draft = phases.draft
1811
1811
1812 # exclude changesets already public locally and update the others
1812 # exclude changesets already public locally and update the others
1813 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1813 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1814 if pheads:
1814 if pheads:
1815 tr = pullop.gettransaction()
1815 tr = pullop.gettransaction()
1816 phases.advanceboundary(pullop.repo, tr, public, pheads)
1816 phases.advanceboundary(pullop.repo, tr, public, pheads)
1817
1817
1818 # exclude changesets already draft locally and update the others
1818 # exclude changesets already draft locally and update the others
1819 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1819 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1820 if dheads:
1820 if dheads:
1821 tr = pullop.gettransaction()
1821 tr = pullop.gettransaction()
1822 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1822 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1823
1823
1824 def _pullbookmarks(pullop):
1824 def _pullbookmarks(pullop):
1825 """process the remote bookmark information to update the local one"""
1825 """process the remote bookmark information to update the local one"""
1826 if 'bookmarks' in pullop.stepsdone:
1826 if 'bookmarks' in pullop.stepsdone:
1827 return
1827 return
1828 pullop.stepsdone.add('bookmarks')
1828 pullop.stepsdone.add('bookmarks')
1829 repo = pullop.repo
1829 repo = pullop.repo
1830 remotebookmarks = pullop.remotebookmarks
1830 remotebookmarks = pullop.remotebookmarks
1831 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1831 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1832 pullop.remote.url(),
1832 pullop.remote.url(),
1833 pullop.gettransaction,
1833 pullop.gettransaction,
1834 explicit=pullop.explicitbookmarks)
1834 explicit=pullop.explicitbookmarks)
1835
1835
1836 def _pullobsolete(pullop):
1836 def _pullobsolete(pullop):
1837 """utility function to pull obsolete markers from a remote
1837 """utility function to pull obsolete markers from a remote
1838
1838
1839 The `gettransaction` is function that return the pull transaction, creating
1839 The `gettransaction` is function that return the pull transaction, creating
1840 one if necessary. We return the transaction to inform the calling code that
1840 one if necessary. We return the transaction to inform the calling code that
1841 a new transaction have been created (when applicable).
1841 a new transaction have been created (when applicable).
1842
1842
1843 Exists mostly to allow overriding for experimentation purpose"""
1843 Exists mostly to allow overriding for experimentation purpose"""
1844 if 'obsmarkers' in pullop.stepsdone:
1844 if 'obsmarkers' in pullop.stepsdone:
1845 return
1845 return
1846 pullop.stepsdone.add('obsmarkers')
1846 pullop.stepsdone.add('obsmarkers')
1847 tr = None
1847 tr = None
1848 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1848 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1849 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1849 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1850 remoteobs = listkeys(pullop.remote, 'obsolete')
1850 remoteobs = listkeys(pullop.remote, 'obsolete')
1851 if 'dump0' in remoteobs:
1851 if 'dump0' in remoteobs:
1852 tr = pullop.gettransaction()
1852 tr = pullop.gettransaction()
1853 markers = []
1853 markers = []
1854 for key in sorted(remoteobs, reverse=True):
1854 for key in sorted(remoteobs, reverse=True):
1855 if key.startswith('dump'):
1855 if key.startswith('dump'):
1856 data = util.b85decode(remoteobs[key])
1856 data = util.b85decode(remoteobs[key])
1857 version, newmarks = obsolete._readmarkers(data)
1857 version, newmarks = obsolete._readmarkers(data)
1858 markers += newmarks
1858 markers += newmarks
1859 if markers:
1859 if markers:
1860 pullop.repo.obsstore.add(tr, markers)
1860 pullop.repo.obsstore.add(tr, markers)
1861 pullop.repo.invalidatevolatilesets()
1861 pullop.repo.invalidatevolatilesets()
1862 return tr
1862 return tr
1863
1863
1864 def applynarrowacl(repo, kwargs):
1864 def applynarrowacl(repo, kwargs):
1865 """Apply narrow fetch access control.
1865 """Apply narrow fetch access control.
1866
1866
1867 This massages the named arguments for getbundle wire protocol commands
1867 This massages the named arguments for getbundle wire protocol commands
1868 so requested data is filtered through access control rules.
1868 so requested data is filtered through access control rules.
1869 """
1869 """
1870 ui = repo.ui
1870 ui = repo.ui
1871 # TODO this assumes existence of HTTP and is a layering violation.
1871 # TODO this assumes existence of HTTP and is a layering violation.
1872 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1872 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1873 user_includes = ui.configlist(
1873 user_includes = ui.configlist(
1874 _NARROWACL_SECTION, username + '.includes',
1874 _NARROWACL_SECTION, username + '.includes',
1875 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1875 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1876 user_excludes = ui.configlist(
1876 user_excludes = ui.configlist(
1877 _NARROWACL_SECTION, username + '.excludes',
1877 _NARROWACL_SECTION, username + '.excludes',
1878 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1878 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1879 if not user_includes:
1879 if not user_includes:
1880 raise error.Abort(_("{} configuration for user {} is empty")
1880 raise error.Abort(_("{} configuration for user {} is empty")
1881 .format(_NARROWACL_SECTION, username))
1881 .format(_NARROWACL_SECTION, username))
1882
1882
1883 user_includes = [
1883 user_includes = [
1884 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1884 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1885 user_excludes = [
1885 user_excludes = [
1886 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1886 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1887
1887
1888 req_includes = set(kwargs.get(r'includepats', []))
1888 req_includes = set(kwargs.get(r'includepats', []))
1889 req_excludes = set(kwargs.get(r'excludepats', []))
1889 req_excludes = set(kwargs.get(r'excludepats', []))
1890
1890
1891 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1891 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1892 req_includes, req_excludes, user_includes, user_excludes)
1892 req_includes, req_excludes, user_includes, user_excludes)
1893
1893
1894 if invalid_includes:
1894 if invalid_includes:
1895 raise error.Abort(
1895 raise error.Abort(
1896 _("The following includes are not accessible for {}: {}")
1896 _("The following includes are not accessible for {}: {}")
1897 .format(username, invalid_includes))
1897 .format(username, invalid_includes))
1898
1898
1899 new_args = {}
1899 new_args = {}
1900 new_args.update(kwargs)
1900 new_args.update(kwargs)
1901 new_args[r'narrow'] = True
1901 new_args[r'narrow'] = True
1902 new_args[r'includepats'] = req_includes
1902 new_args[r'includepats'] = req_includes
1903 if req_excludes:
1903 if req_excludes:
1904 new_args[r'excludepats'] = req_excludes
1904 new_args[r'excludepats'] = req_excludes
1905
1905
1906 return new_args
1906 return new_args
1907
1907
1908 def _computeellipsis(repo, common, heads, known, match, depth=None):
1908 def _computeellipsis(repo, common, heads, known, match, depth=None):
1909 """Compute the shape of a narrowed DAG.
1909 """Compute the shape of a narrowed DAG.
1910
1910
1911 Args:
1911 Args:
1912 repo: The repository we're transferring.
1912 repo: The repository we're transferring.
1913 common: The roots of the DAG range we're transferring.
1913 common: The roots of the DAG range we're transferring.
1914 May be just [nullid], which means all ancestors of heads.
1914 May be just [nullid], which means all ancestors of heads.
1915 heads: The heads of the DAG range we're transferring.
1915 heads: The heads of the DAG range we're transferring.
1916 match: The narrowmatcher that allows us to identify relevant changes.
1916 match: The narrowmatcher that allows us to identify relevant changes.
1917 depth: If not None, only consider nodes to be full nodes if they are at
1917 depth: If not None, only consider nodes to be full nodes if they are at
1918 most depth changesets away from one of heads.
1918 most depth changesets away from one of heads.
1919
1919
1920 Returns:
1920 Returns:
1921 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1921 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1922
1922
1923 visitnodes: The list of nodes (either full or ellipsis) which
1923 visitnodes: The list of nodes (either full or ellipsis) which
1924 need to be sent to the client.
1924 need to be sent to the client.
1925 relevant_nodes: The set of changelog nodes which change a file inside
1925 relevant_nodes: The set of changelog nodes which change a file inside
1926 the narrowspec. The client needs these as non-ellipsis nodes.
1926 the narrowspec. The client needs these as non-ellipsis nodes.
1927 ellipsisroots: A dict of {rev: parents} that is used in
1927 ellipsisroots: A dict of {rev: parents} that is used in
1928 narrowchangegroup to produce ellipsis nodes with the
1928 narrowchangegroup to produce ellipsis nodes with the
1929 correct parents.
1929 correct parents.
1930 """
1930 """
1931 cl = repo.changelog
1931 cl = repo.changelog
1932 mfl = repo.manifestlog
1932 mfl = repo.manifestlog
1933
1933
1934 clrev = cl.rev
1934 clrev = cl.rev
1935
1935
1936 commonrevs = {clrev(n) for n in common} | {nullrev}
1936 commonrevs = {clrev(n) for n in common} | {nullrev}
1937 headsrevs = {clrev(n) for n in heads}
1937 headsrevs = {clrev(n) for n in heads}
1938
1938
1939 if depth:
1939 if depth:
1940 revdepth = {h: 0 for h in headsrevs}
1940 revdepth = {h: 0 for h in headsrevs}
1941
1941
1942 ellipsisheads = collections.defaultdict(set)
1942 ellipsisheads = collections.defaultdict(set)
1943 ellipsisroots = collections.defaultdict(set)
1943 ellipsisroots = collections.defaultdict(set)
1944
1944
1945 def addroot(head, curchange):
1945 def addroot(head, curchange):
1946 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1946 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1947 ellipsisroots[head].add(curchange)
1947 ellipsisroots[head].add(curchange)
1948 # Recursively split ellipsis heads with 3 roots by finding the
1948 # Recursively split ellipsis heads with 3 roots by finding the
1949 # roots' youngest common descendant which is an elided merge commit.
1949 # roots' youngest common descendant which is an elided merge commit.
1950 # That descendant takes 2 of the 3 roots as its own, and becomes a
1950 # That descendant takes 2 of the 3 roots as its own, and becomes a
1951 # root of the head.
1951 # root of the head.
1952 while len(ellipsisroots[head]) > 2:
1952 while len(ellipsisroots[head]) > 2:
1953 child, roots = splithead(head)
1953 child, roots = splithead(head)
1954 splitroots(head, child, roots)
1954 splitroots(head, child, roots)
1955 head = child # Recurse in case we just added a 3rd root
1955 head = child # Recurse in case we just added a 3rd root
1956
1956
1957 def splitroots(head, child, roots):
1957 def splitroots(head, child, roots):
1958 ellipsisroots[head].difference_update(roots)
1958 ellipsisroots[head].difference_update(roots)
1959 ellipsisroots[head].add(child)
1959 ellipsisroots[head].add(child)
1960 ellipsisroots[child].update(roots)
1960 ellipsisroots[child].update(roots)
1961 ellipsisroots[child].discard(child)
1961 ellipsisroots[child].discard(child)
1962
1962
1963 def splithead(head):
1963 def splithead(head):
1964 r1, r2, r3 = sorted(ellipsisroots[head])
1964 r1, r2, r3 = sorted(ellipsisroots[head])
1965 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1965 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1966 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1966 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1967 nr1, head, nr2, head)
1967 nr1, head, nr2, head)
1968 for j in mid:
1968 for j in mid:
1969 if j == nr2:
1969 if j == nr2:
1970 return nr2, (nr1, nr2)
1970 return nr2, (nr1, nr2)
1971 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1971 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1972 return j, (nr1, nr2)
1972 return j, (nr1, nr2)
1973 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1973 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1974 'roots: %d %d %d') % (head, r1, r2, r3))
1974 'roots: %d %d %d') % (head, r1, r2, r3))
1975
1975
1976 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1976 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1977 visit = reversed(missing)
1977 visit = reversed(missing)
1978 relevant_nodes = set()
1978 relevant_nodes = set()
1979 visitnodes = [cl.node(m) for m in missing]
1979 visitnodes = [cl.node(m) for m in missing]
1980 required = set(headsrevs) | known
1980 required = set(headsrevs) | known
1981 for rev in visit:
1981 for rev in visit:
1982 clrev = cl.changelogrevision(rev)
1982 clrev = cl.changelogrevision(rev)
1983 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1983 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
1984 if depth is not None:
1984 if depth is not None:
1985 curdepth = revdepth[rev]
1985 curdepth = revdepth[rev]
1986 for p in ps:
1986 for p in ps:
1987 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1987 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1988 needed = False
1988 needed = False
1989 shallow_enough = depth is None or revdepth[rev] <= depth
1989 shallow_enough = depth is None or revdepth[rev] <= depth
1990 if shallow_enough:
1990 if shallow_enough:
1991 curmf = mfl[clrev.manifest].read()
1991 curmf = mfl[clrev.manifest].read()
1992 if ps:
1992 if ps:
1993 # We choose to not trust the changed files list in
1993 # We choose to not trust the changed files list in
1994 # changesets because it's not always correct. TODO: could
1994 # changesets because it's not always correct. TODO: could
1995 # we trust it for the non-merge case?
1995 # we trust it for the non-merge case?
1996 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1996 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1997 needed = bool(curmf.diff(p1mf, match))
1997 needed = bool(curmf.diff(p1mf, match))
1998 if not needed and len(ps) > 1:
1998 if not needed and len(ps) > 1:
1999 # For merge changes, the list of changed files is not
1999 # For merge changes, the list of changed files is not
2000 # helpful, since we need to emit the merge if a file
2000 # helpful, since we need to emit the merge if a file
2001 # in the narrow spec has changed on either side of the
2001 # in the narrow spec has changed on either side of the
2002 # merge. As a result, we do a manifest diff to check.
2002 # merge. As a result, we do a manifest diff to check.
2003 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2003 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2004 needed = bool(curmf.diff(p2mf, match))
2004 needed = bool(curmf.diff(p2mf, match))
2005 else:
2005 else:
2006 # For a root node, we need to include the node if any
2006 # For a root node, we need to include the node if any
2007 # files in the node match the narrowspec.
2007 # files in the node match the narrowspec.
2008 needed = any(curmf.walk(match))
2008 needed = any(curmf.walk(match))
2009
2009
2010 if needed:
2010 if needed:
2011 for head in ellipsisheads[rev]:
2011 for head in ellipsisheads[rev]:
2012 addroot(head, rev)
2012 addroot(head, rev)
2013 for p in ps:
2013 for p in ps:
2014 required.add(p)
2014 required.add(p)
2015 relevant_nodes.add(cl.node(rev))
2015 relevant_nodes.add(cl.node(rev))
2016 else:
2016 else:
2017 if not ps:
2017 if not ps:
2018 ps = [nullrev]
2018 ps = [nullrev]
2019 if rev in required:
2019 if rev in required:
2020 for head in ellipsisheads[rev]:
2020 for head in ellipsisheads[rev]:
2021 addroot(head, rev)
2021 addroot(head, rev)
2022 for p in ps:
2022 for p in ps:
2023 ellipsisheads[p].add(rev)
2023 ellipsisheads[p].add(rev)
2024 else:
2024 else:
2025 for p in ps:
2025 for p in ps:
2026 ellipsisheads[p] |= ellipsisheads[rev]
2026 ellipsisheads[p] |= ellipsisheads[rev]
2027
2027
2028 # add common changesets as roots of their reachable ellipsis heads
2028 # add common changesets as roots of their reachable ellipsis heads
2029 for c in commonrevs:
2029 for c in commonrevs:
2030 for head in ellipsisheads[c]:
2030 for head in ellipsisheads[c]:
2031 addroot(head, c)
2031 addroot(head, c)
2032 return visitnodes, relevant_nodes, ellipsisroots
2032 return visitnodes, relevant_nodes, ellipsisroots
2033
2033
2034 def caps20to10(repo, role):
2034 def caps20to10(repo, role):
2035 """return a set with appropriate options to use bundle20 during getbundle"""
2035 """return a set with appropriate options to use bundle20 during getbundle"""
2036 caps = {'HG20'}
2036 caps = {'HG20'}
2037 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2037 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2038 caps.add('bundle2=' + urlreq.quote(capsblob))
2038 caps.add('bundle2=' + urlreq.quote(capsblob))
2039 return caps
2039 return caps
2040
2040
2041 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2041 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2042 getbundle2partsorder = []
2042 getbundle2partsorder = []
2043
2043
2044 # Mapping between step name and function
2044 # Mapping between step name and function
2045 #
2045 #
2046 # This exists to help extensions wrap steps if necessary
2046 # This exists to help extensions wrap steps if necessary
2047 getbundle2partsmapping = {}
2047 getbundle2partsmapping = {}
2048
2048
2049 def getbundle2partsgenerator(stepname, idx=None):
2049 def getbundle2partsgenerator(stepname, idx=None):
2050 """decorator for function generating bundle2 part for getbundle
2050 """decorator for function generating bundle2 part for getbundle
2051
2051
2052 The function is added to the step -> function mapping and appended to the
2052 The function is added to the step -> function mapping and appended to the
2053 list of steps. Beware that decorated functions will be added in order
2053 list of steps. Beware that decorated functions will be added in order
2054 (this may matter).
2054 (this may matter).
2055
2055
2056 You can only use this decorator for new steps, if you want to wrap a step
2056 You can only use this decorator for new steps, if you want to wrap a step
2057 from an extension, attack the getbundle2partsmapping dictionary directly."""
2057 from an extension, attack the getbundle2partsmapping dictionary directly."""
2058 def dec(func):
2058 def dec(func):
2059 assert stepname not in getbundle2partsmapping
2059 assert stepname not in getbundle2partsmapping
2060 getbundle2partsmapping[stepname] = func
2060 getbundle2partsmapping[stepname] = func
2061 if idx is None:
2061 if idx is None:
2062 getbundle2partsorder.append(stepname)
2062 getbundle2partsorder.append(stepname)
2063 else:
2063 else:
2064 getbundle2partsorder.insert(idx, stepname)
2064 getbundle2partsorder.insert(idx, stepname)
2065 return func
2065 return func
2066 return dec
2066 return dec
2067
2067
2068 def bundle2requested(bundlecaps):
2068 def bundle2requested(bundlecaps):
2069 if bundlecaps is not None:
2069 if bundlecaps is not None:
2070 return any(cap.startswith('HG2') for cap in bundlecaps)
2070 return any(cap.startswith('HG2') for cap in bundlecaps)
2071 return False
2071 return False
2072
2072
2073 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2073 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2074 **kwargs):
2074 **kwargs):
2075 """Return chunks constituting a bundle's raw data.
2075 """Return chunks constituting a bundle's raw data.
2076
2076
2077 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2077 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2078 passed.
2078 passed.
2079
2079
2080 Returns a 2-tuple of a dict with metadata about the generated bundle
2080 Returns a 2-tuple of a dict with metadata about the generated bundle
2081 and an iterator over raw chunks (of varying sizes).
2081 and an iterator over raw chunks (of varying sizes).
2082 """
2082 """
2083 kwargs = pycompat.byteskwargs(kwargs)
2083 kwargs = pycompat.byteskwargs(kwargs)
2084 info = {}
2084 info = {}
2085 usebundle2 = bundle2requested(bundlecaps)
2085 usebundle2 = bundle2requested(bundlecaps)
2086 # bundle10 case
2086 # bundle10 case
2087 if not usebundle2:
2087 if not usebundle2:
2088 if bundlecaps and not kwargs.get('cg', True):
2088 if bundlecaps and not kwargs.get('cg', True):
2089 raise ValueError(_('request for bundle10 must include changegroup'))
2089 raise ValueError(_('request for bundle10 must include changegroup'))
2090
2090
2091 if kwargs:
2091 if kwargs:
2092 raise ValueError(_('unsupported getbundle arguments: %s')
2092 raise ValueError(_('unsupported getbundle arguments: %s')
2093 % ', '.join(sorted(kwargs.keys())))
2093 % ', '.join(sorted(kwargs.keys())))
2094 outgoing = _computeoutgoing(repo, heads, common)
2094 outgoing = _computeoutgoing(repo, heads, common)
2095 info['bundleversion'] = 1
2095 info['bundleversion'] = 1
2096 return info, changegroup.makestream(repo, outgoing, '01', source,
2096 return info, changegroup.makestream(repo, outgoing, '01', source,
2097 bundlecaps=bundlecaps)
2097 bundlecaps=bundlecaps)
2098
2098
2099 # bundle20 case
2099 # bundle20 case
2100 info['bundleversion'] = 2
2100 info['bundleversion'] = 2
2101 b2caps = {}
2101 b2caps = {}
2102 for bcaps in bundlecaps:
2102 for bcaps in bundlecaps:
2103 if bcaps.startswith('bundle2='):
2103 if bcaps.startswith('bundle2='):
2104 blob = urlreq.unquote(bcaps[len('bundle2='):])
2104 blob = urlreq.unquote(bcaps[len('bundle2='):])
2105 b2caps.update(bundle2.decodecaps(blob))
2105 b2caps.update(bundle2.decodecaps(blob))
2106 bundler = bundle2.bundle20(repo.ui, b2caps)
2106 bundler = bundle2.bundle20(repo.ui, b2caps)
2107
2107
2108 kwargs['heads'] = heads
2108 kwargs['heads'] = heads
2109 kwargs['common'] = common
2109 kwargs['common'] = common
2110
2110
2111 for name in getbundle2partsorder:
2111 for name in getbundle2partsorder:
2112 func = getbundle2partsmapping[name]
2112 func = getbundle2partsmapping[name]
2113 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2113 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2114 **pycompat.strkwargs(kwargs))
2114 **pycompat.strkwargs(kwargs))
2115
2115
2116 info['prefercompressed'] = bundler.prefercompressed
2116 info['prefercompressed'] = bundler.prefercompressed
2117
2117
2118 return info, bundler.getchunks()
2118 return info, bundler.getchunks()
2119
2119
2120 @getbundle2partsgenerator('stream2')
2120 @getbundle2partsgenerator('stream2')
2121 def _getbundlestream2(bundler, repo, *args, **kwargs):
2121 def _getbundlestream2(bundler, repo, *args, **kwargs):
2122 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2122 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2123
2123
2124 @getbundle2partsgenerator('changegroup')
2124 @getbundle2partsgenerator('changegroup')
2125 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2125 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2126 b2caps=None, heads=None, common=None, **kwargs):
2126 b2caps=None, heads=None, common=None, **kwargs):
2127 """add a changegroup part to the requested bundle"""
2127 """add a changegroup part to the requested bundle"""
2128 if not kwargs.get(r'cg', True):
2128 if not kwargs.get(r'cg', True):
2129 return
2129 return
2130
2130
2131 version = '01'
2131 version = '01'
2132 cgversions = b2caps.get('changegroup')
2132 cgversions = b2caps.get('changegroup')
2133 if cgversions: # 3.1 and 3.2 ship with an empty value
2133 if cgversions: # 3.1 and 3.2 ship with an empty value
2134 cgversions = [v for v in cgversions
2134 cgversions = [v for v in cgversions
2135 if v in changegroup.supportedoutgoingversions(repo)]
2135 if v in changegroup.supportedoutgoingversions(repo)]
2136 if not cgversions:
2136 if not cgversions:
2137 raise ValueError(_('no common changegroup version'))
2137 raise ValueError(_('no common changegroup version'))
2138 version = max(cgversions)
2138 version = max(cgversions)
2139
2139
2140 outgoing = _computeoutgoing(repo, heads, common)
2140 outgoing = _computeoutgoing(repo, heads, common)
2141 if not outgoing.missing:
2141 if not outgoing.missing:
2142 return
2142 return
2143
2143
2144 if kwargs.get(r'narrow', False):
2144 if kwargs.get(r'narrow', False):
2145 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2145 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2146 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2146 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2147 filematcher = narrowspec.match(repo.root, include=include,
2147 filematcher = narrowspec.match(repo.root, include=include,
2148 exclude=exclude)
2148 exclude=exclude)
2149 else:
2149 else:
2150 filematcher = None
2150 filematcher = None
2151
2151
2152 cgstream = changegroup.makestream(repo, outgoing, version, source,
2152 cgstream = changegroup.makestream(repo, outgoing, version, source,
2153 bundlecaps=bundlecaps,
2153 bundlecaps=bundlecaps,
2154 filematcher=filematcher)
2154 filematcher=filematcher)
2155
2155
2156 part = bundler.newpart('changegroup', data=cgstream)
2156 part = bundler.newpart('changegroup', data=cgstream)
2157 if cgversions:
2157 if cgversions:
2158 part.addparam('version', version)
2158 part.addparam('version', version)
2159
2159
2160 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2160 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2161 mandatory=False)
2161 mandatory=False)
2162
2162
2163 if 'treemanifest' in repo.requirements:
2163 if 'treemanifest' in repo.requirements:
2164 part.addparam('treemanifest', '1')
2164 part.addparam('treemanifest', '1')
2165
2165
2166 if kwargs.get(r'narrow', False) and (include or exclude):
2166 if kwargs.get(r'narrow', False) and (include or exclude):
2167 narrowspecpart = bundler.newpart('narrow:spec')
2167 narrowspecpart = bundler.newpart('narrow:spec')
2168 if include:
2168 if include:
2169 narrowspecpart.addparam(
2169 narrowspecpart.addparam(
2170 'include', '\n'.join(include), mandatory=True)
2170 'include', '\n'.join(include), mandatory=True)
2171 if exclude:
2171 if exclude:
2172 narrowspecpart.addparam(
2172 narrowspecpart.addparam(
2173 'exclude', '\n'.join(exclude), mandatory=True)
2173 'exclude', '\n'.join(exclude), mandatory=True)
2174
2174
2175 @getbundle2partsgenerator('bookmarks')
2175 @getbundle2partsgenerator('bookmarks')
2176 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2176 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2177 b2caps=None, **kwargs):
2177 b2caps=None, **kwargs):
2178 """add a bookmark part to the requested bundle"""
2178 """add a bookmark part to the requested bundle"""
2179 if not kwargs.get(r'bookmarks', False):
2179 if not kwargs.get(r'bookmarks', False):
2180 return
2180 return
2181 if 'bookmarks' not in b2caps:
2181 if 'bookmarks' not in b2caps:
2182 raise ValueError(_('no common bookmarks exchange method'))
2182 raise ValueError(_('no common bookmarks exchange method'))
2183 books = bookmod.listbinbookmarks(repo)
2183 books = bookmod.listbinbookmarks(repo)
2184 data = bookmod.binaryencode(books)
2184 data = bookmod.binaryencode(books)
2185 if data:
2185 if data:
2186 bundler.newpart('bookmarks', data=data)
2186 bundler.newpart('bookmarks', data=data)
2187
2187
2188 @getbundle2partsgenerator('listkeys')
2188 @getbundle2partsgenerator('listkeys')
2189 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2189 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2190 b2caps=None, **kwargs):
2190 b2caps=None, **kwargs):
2191 """add parts containing listkeys namespaces to the requested bundle"""
2191 """add parts containing listkeys namespaces to the requested bundle"""
2192 listkeys = kwargs.get(r'listkeys', ())
2192 listkeys = kwargs.get(r'listkeys', ())
2193 for namespace in listkeys:
2193 for namespace in listkeys:
2194 part = bundler.newpart('listkeys')
2194 part = bundler.newpart('listkeys')
2195 part.addparam('namespace', namespace)
2195 part.addparam('namespace', namespace)
2196 keys = repo.listkeys(namespace).items()
2196 keys = repo.listkeys(namespace).items()
2197 part.data = pushkey.encodekeys(keys)
2197 part.data = pushkey.encodekeys(keys)
2198
2198
2199 @getbundle2partsgenerator('obsmarkers')
2199 @getbundle2partsgenerator('obsmarkers')
2200 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2200 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2201 b2caps=None, heads=None, **kwargs):
2201 b2caps=None, heads=None, **kwargs):
2202 """add an obsolescence markers part to the requested bundle"""
2202 """add an obsolescence markers part to the requested bundle"""
2203 if kwargs.get(r'obsmarkers', False):
2203 if kwargs.get(r'obsmarkers', False):
2204 if heads is None:
2204 if heads is None:
2205 heads = repo.heads()
2205 heads = repo.heads()
2206 subset = [c.node() for c in repo.set('::%ln', heads)]
2206 subset = [c.node() for c in repo.set('::%ln', heads)]
2207 markers = repo.obsstore.relevantmarkers(subset)
2207 markers = repo.obsstore.relevantmarkers(subset)
2208 markers = sorted(markers)
2208 markers = sorted(markers)
2209 bundle2.buildobsmarkerspart(bundler, markers)
2209 bundle2.buildobsmarkerspart(bundler, markers)
2210
2210
2211 @getbundle2partsgenerator('phases')
2211 @getbundle2partsgenerator('phases')
2212 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2212 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2213 b2caps=None, heads=None, **kwargs):
2213 b2caps=None, heads=None, **kwargs):
2214 """add phase heads part to the requested bundle"""
2214 """add phase heads part to the requested bundle"""
2215 if kwargs.get(r'phases', False):
2215 if kwargs.get(r'phases', False):
2216 if not 'heads' in b2caps.get('phases'):
2216 if not 'heads' in b2caps.get('phases'):
2217 raise ValueError(_('no common phases exchange method'))
2217 raise ValueError(_('no common phases exchange method'))
2218 if heads is None:
2218 if heads is None:
2219 heads = repo.heads()
2219 heads = repo.heads()
2220
2220
2221 headsbyphase = collections.defaultdict(set)
2221 headsbyphase = collections.defaultdict(set)
2222 if repo.publishing():
2222 if repo.publishing():
2223 headsbyphase[phases.public] = heads
2223 headsbyphase[phases.public] = heads
2224 else:
2224 else:
2225 # find the appropriate heads to move
2225 # find the appropriate heads to move
2226
2226
2227 phase = repo._phasecache.phase
2227 phase = repo._phasecache.phase
2228 node = repo.changelog.node
2228 node = repo.changelog.node
2229 rev = repo.changelog.rev
2229 rev = repo.changelog.rev
2230 for h in heads:
2230 for h in heads:
2231 headsbyphase[phase(repo, rev(h))].add(h)
2231 headsbyphase[phase(repo, rev(h))].add(h)
2232 seenphases = list(headsbyphase.keys())
2232 seenphases = list(headsbyphase.keys())
2233
2233
2234 # We do not handle anything but public and draft phase for now)
2234 # We do not handle anything but public and draft phase for now)
2235 if seenphases:
2235 if seenphases:
2236 assert max(seenphases) <= phases.draft
2236 assert max(seenphases) <= phases.draft
2237
2237
2238 # if client is pulling non-public changesets, we need to find
2238 # if client is pulling non-public changesets, we need to find
2239 # intermediate public heads.
2239 # intermediate public heads.
2240 draftheads = headsbyphase.get(phases.draft, set())
2240 draftheads = headsbyphase.get(phases.draft, set())
2241 if draftheads:
2241 if draftheads:
2242 publicheads = headsbyphase.get(phases.public, set())
2242 publicheads = headsbyphase.get(phases.public, set())
2243
2243
2244 revset = 'heads(only(%ln, %ln) and public())'
2244 revset = 'heads(only(%ln, %ln) and public())'
2245 extraheads = repo.revs(revset, draftheads, publicheads)
2245 extraheads = repo.revs(revset, draftheads, publicheads)
2246 for r in extraheads:
2246 for r in extraheads:
2247 headsbyphase[phases.public].add(node(r))
2247 headsbyphase[phases.public].add(node(r))
2248
2248
2249 # transform data in a format used by the encoding function
2249 # transform data in a format used by the encoding function
2250 phasemapping = []
2250 phasemapping = []
2251 for phase in phases.allphases:
2251 for phase in phases.allphases:
2252 phasemapping.append(sorted(headsbyphase[phase]))
2252 phasemapping.append(sorted(headsbyphase[phase]))
2253
2253
2254 # generate the actual part
2254 # generate the actual part
2255 phasedata = phases.binaryencode(phasemapping)
2255 phasedata = phases.binaryencode(phasemapping)
2256 bundler.newpart('phase-heads', data=phasedata)
2256 bundler.newpart('phase-heads', data=phasedata)
2257
2257
2258 @getbundle2partsgenerator('hgtagsfnodes')
2258 @getbundle2partsgenerator('hgtagsfnodes')
2259 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2259 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2260 b2caps=None, heads=None, common=None,
2260 b2caps=None, heads=None, common=None,
2261 **kwargs):
2261 **kwargs):
2262 """Transfer the .hgtags filenodes mapping.
2262 """Transfer the .hgtags filenodes mapping.
2263
2263
2264 Only values for heads in this bundle will be transferred.
2264 Only values for heads in this bundle will be transferred.
2265
2265
2266 The part data consists of pairs of 20 byte changeset node and .hgtags
2266 The part data consists of pairs of 20 byte changeset node and .hgtags
2267 filenodes raw values.
2267 filenodes raw values.
2268 """
2268 """
2269 # Don't send unless:
2269 # Don't send unless:
2270 # - changeset are being exchanged,
2270 # - changeset are being exchanged,
2271 # - the client supports it.
2271 # - the client supports it.
2272 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2272 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2273 return
2273 return
2274
2274
2275 outgoing = _computeoutgoing(repo, heads, common)
2275 outgoing = _computeoutgoing(repo, heads, common)
2276 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2276 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2277
2277
2278 @getbundle2partsgenerator('cache:rev-branch-cache')
2278 @getbundle2partsgenerator('cache:rev-branch-cache')
2279 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2279 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2280 b2caps=None, heads=None, common=None,
2280 b2caps=None, heads=None, common=None,
2281 **kwargs):
2281 **kwargs):
2282 """Transfer the rev-branch-cache mapping
2282 """Transfer the rev-branch-cache mapping
2283
2283
2284 The payload is a series of data related to each branch
2284 The payload is a series of data related to each branch
2285
2285
2286 1) branch name length
2286 1) branch name length
2287 2) number of open heads
2287 2) number of open heads
2288 3) number of closed heads
2288 3) number of closed heads
2289 4) open heads nodes
2289 4) open heads nodes
2290 5) closed heads nodes
2290 5) closed heads nodes
2291 """
2291 """
2292 # Don't send unless:
2292 # Don't send unless:
2293 # - changeset are being exchanged,
2293 # - changeset are being exchanged,
2294 # - the client supports it.
2294 # - the client supports it.
2295 # - narrow bundle isn't in play (not currently compatible).
2295 # - narrow bundle isn't in play (not currently compatible).
2296 if (not kwargs.get(r'cg', True)
2296 if (not kwargs.get(r'cg', True)
2297 or 'rev-branch-cache' not in b2caps
2297 or 'rev-branch-cache' not in b2caps
2298 or kwargs.get(r'narrow', False)
2298 or kwargs.get(r'narrow', False)
2299 or repo.ui.has_section(_NARROWACL_SECTION)):
2299 or repo.ui.has_section(_NARROWACL_SECTION)):
2300 return
2300 return
2301
2301
2302 outgoing = _computeoutgoing(repo, heads, common)
2302 outgoing = _computeoutgoing(repo, heads, common)
2303 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2303 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2304
2304
2305 def check_heads(repo, their_heads, context):
2305 def check_heads(repo, their_heads, context):
2306 """check if the heads of a repo have been modified
2306 """check if the heads of a repo have been modified
2307
2307
2308 Used by peer for unbundling.
2308 Used by peer for unbundling.
2309 """
2309 """
2310 heads = repo.heads()
2310 heads = repo.heads()
2311 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2311 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2312 if not (their_heads == ['force'] or their_heads == heads or
2312 if not (their_heads == ['force'] or their_heads == heads or
2313 their_heads == ['hashed', heads_hash]):
2313 their_heads == ['hashed', heads_hash]):
2314 # someone else committed/pushed/unbundled while we
2314 # someone else committed/pushed/unbundled while we
2315 # were transferring data
2315 # were transferring data
2316 raise error.PushRaced('repository changed while %s - '
2316 raise error.PushRaced('repository changed while %s - '
2317 'please try again' % context)
2317 'please try again' % context)
2318
2318
2319 def unbundle(repo, cg, heads, source, url):
2319 def unbundle(repo, cg, heads, source, url):
2320 """Apply a bundle to a repo.
2320 """Apply a bundle to a repo.
2321
2321
2322 this function makes sure the repo is locked during the application and have
2322 this function makes sure the repo is locked during the application and have
2323 mechanism to check that no push race occurred between the creation of the
2323 mechanism to check that no push race occurred between the creation of the
2324 bundle and its application.
2324 bundle and its application.
2325
2325
2326 If the push was raced as PushRaced exception is raised."""
2326 If the push was raced as PushRaced exception is raised."""
2327 r = 0
2327 r = 0
2328 # need a transaction when processing a bundle2 stream
2328 # need a transaction when processing a bundle2 stream
2329 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2329 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2330 lockandtr = [None, None, None]
2330 lockandtr = [None, None, None]
2331 recordout = None
2331 recordout = None
2332 # quick fix for output mismatch with bundle2 in 3.4
2332 # quick fix for output mismatch with bundle2 in 3.4
2333 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2333 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2334 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2334 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2335 captureoutput = True
2335 captureoutput = True
2336 try:
2336 try:
2337 # note: outside bundle1, 'heads' is expected to be empty and this
2337 # note: outside bundle1, 'heads' is expected to be empty and this
2338 # 'check_heads' call wil be a no-op
2338 # 'check_heads' call wil be a no-op
2339 check_heads(repo, heads, 'uploading changes')
2339 check_heads(repo, heads, 'uploading changes')
2340 # push can proceed
2340 # push can proceed
2341 if not isinstance(cg, bundle2.unbundle20):
2341 if not isinstance(cg, bundle2.unbundle20):
2342 # legacy case: bundle1 (changegroup 01)
2342 # legacy case: bundle1 (changegroup 01)
2343 txnname = "\n".join([source, util.hidepassword(url)])
2343 txnname = "\n".join([source, util.hidepassword(url)])
2344 with repo.lock(), repo.transaction(txnname) as tr:
2344 with repo.lock(), repo.transaction(txnname) as tr:
2345 op = bundle2.applybundle(repo, cg, tr, source, url)
2345 op = bundle2.applybundle(repo, cg, tr, source, url)
2346 r = bundle2.combinechangegroupresults(op)
2346 r = bundle2.combinechangegroupresults(op)
2347 else:
2347 else:
2348 r = None
2348 r = None
2349 try:
2349 try:
2350 def gettransaction():
2350 def gettransaction():
2351 if not lockandtr[2]:
2351 if not lockandtr[2]:
2352 lockandtr[0] = repo.wlock()
2352 lockandtr[0] = repo.wlock()
2353 lockandtr[1] = repo.lock()
2353 lockandtr[1] = repo.lock()
2354 lockandtr[2] = repo.transaction(source)
2354 lockandtr[2] = repo.transaction(source)
2355 lockandtr[2].hookargs['source'] = source
2355 lockandtr[2].hookargs['source'] = source
2356 lockandtr[2].hookargs['url'] = url
2356 lockandtr[2].hookargs['url'] = url
2357 lockandtr[2].hookargs['bundle2'] = '1'
2357 lockandtr[2].hookargs['bundle2'] = '1'
2358 return lockandtr[2]
2358 return lockandtr[2]
2359
2359
2360 # Do greedy locking by default until we're satisfied with lazy
2360 # Do greedy locking by default until we're satisfied with lazy
2361 # locking.
2361 # locking.
2362 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2362 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2363 gettransaction()
2363 gettransaction()
2364
2364
2365 op = bundle2.bundleoperation(repo, gettransaction,
2365 op = bundle2.bundleoperation(repo, gettransaction,
2366 captureoutput=captureoutput,
2366 captureoutput=captureoutput,
2367 source='push')
2367 source='push')
2368 try:
2368 try:
2369 op = bundle2.processbundle(repo, cg, op=op)
2369 op = bundle2.processbundle(repo, cg, op=op)
2370 finally:
2370 finally:
2371 r = op.reply
2371 r = op.reply
2372 if captureoutput and r is not None:
2372 if captureoutput and r is not None:
2373 repo.ui.pushbuffer(error=True, subproc=True)
2373 repo.ui.pushbuffer(error=True, subproc=True)
2374 def recordout(output):
2374 def recordout(output):
2375 r.newpart('output', data=output, mandatory=False)
2375 r.newpart('output', data=output, mandatory=False)
2376 if lockandtr[2] is not None:
2376 if lockandtr[2] is not None:
2377 lockandtr[2].close()
2377 lockandtr[2].close()
2378 except BaseException as exc:
2378 except BaseException as exc:
2379 exc.duringunbundle2 = True
2379 exc.duringunbundle2 = True
2380 if captureoutput and r is not None:
2380 if captureoutput and r is not None:
2381 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2381 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2382 def recordout(output):
2382 def recordout(output):
2383 part = bundle2.bundlepart('output', data=output,
2383 part = bundle2.bundlepart('output', data=output,
2384 mandatory=False)
2384 mandatory=False)
2385 parts.append(part)
2385 parts.append(part)
2386 raise
2386 raise
2387 finally:
2387 finally:
2388 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2388 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2389 if recordout is not None:
2389 if recordout is not None:
2390 recordout(repo.ui.popbuffer())
2390 recordout(repo.ui.popbuffer())
2391 return r
2391 return r
2392
2392
2393 def _maybeapplyclonebundle(pullop):
2393 def _maybeapplyclonebundle(pullop):
2394 """Apply a clone bundle from a remote, if possible."""
2394 """Apply a clone bundle from a remote, if possible."""
2395
2395
2396 repo = pullop.repo
2396 repo = pullop.repo
2397 remote = pullop.remote
2397 remote = pullop.remote
2398
2398
2399 if not repo.ui.configbool('ui', 'clonebundles'):
2399 if not repo.ui.configbool('ui', 'clonebundles'):
2400 return
2400 return
2401
2401
2402 # Only run if local repo is empty.
2402 # Only run if local repo is empty.
2403 if len(repo):
2403 if len(repo):
2404 return
2404 return
2405
2405
2406 if pullop.heads:
2406 if pullop.heads:
2407 return
2407 return
2408
2408
2409 if not remote.capable('clonebundles'):
2409 if not remote.capable('clonebundles'):
2410 return
2410 return
2411
2411
2412 with remote.commandexecutor() as e:
2412 with remote.commandexecutor() as e:
2413 res = e.callcommand('clonebundles', {}).result()
2413 res = e.callcommand('clonebundles', {}).result()
2414
2414
2415 # If we call the wire protocol command, that's good enough to record the
2415 # If we call the wire protocol command, that's good enough to record the
2416 # attempt.
2416 # attempt.
2417 pullop.clonebundleattempted = True
2417 pullop.clonebundleattempted = True
2418
2418
2419 entries = parseclonebundlesmanifest(repo, res)
2419 entries = parseclonebundlesmanifest(repo, res)
2420 if not entries:
2420 if not entries:
2421 repo.ui.note(_('no clone bundles available on remote; '
2421 repo.ui.note(_('no clone bundles available on remote; '
2422 'falling back to regular clone\n'))
2422 'falling back to regular clone\n'))
2423 return
2423 return
2424
2424
2425 entries = filterclonebundleentries(
2425 entries = filterclonebundleentries(
2426 repo, entries, streamclonerequested=pullop.streamclonerequested)
2426 repo, entries, streamclonerequested=pullop.streamclonerequested)
2427
2427
2428 if not entries:
2428 if not entries:
2429 # There is a thundering herd concern here. However, if a server
2429 # There is a thundering herd concern here. However, if a server
2430 # operator doesn't advertise bundles appropriate for its clients,
2430 # operator doesn't advertise bundles appropriate for its clients,
2431 # they deserve what's coming. Furthermore, from a client's
2431 # they deserve what's coming. Furthermore, from a client's
2432 # perspective, no automatic fallback would mean not being able to
2432 # perspective, no automatic fallback would mean not being able to
2433 # clone!
2433 # clone!
2434 repo.ui.warn(_('no compatible clone bundles available on server; '
2434 repo.ui.warn(_('no compatible clone bundles available on server; '
2435 'falling back to regular clone\n'))
2435 'falling back to regular clone\n'))
2436 repo.ui.warn(_('(you may want to report this to the server '
2436 repo.ui.warn(_('(you may want to report this to the server '
2437 'operator)\n'))
2437 'operator)\n'))
2438 return
2438 return
2439
2439
2440 entries = sortclonebundleentries(repo.ui, entries)
2440 entries = sortclonebundleentries(repo.ui, entries)
2441
2441
2442 url = entries[0]['URL']
2442 url = entries[0]['URL']
2443 repo.ui.status(_('applying clone bundle from %s\n') % url)
2443 repo.ui.status(_('applying clone bundle from %s\n') % url)
2444 if trypullbundlefromurl(repo.ui, repo, url):
2444 if trypullbundlefromurl(repo.ui, repo, url):
2445 repo.ui.status(_('finished applying clone bundle\n'))
2445 repo.ui.status(_('finished applying clone bundle\n'))
2446 # Bundle failed.
2446 # Bundle failed.
2447 #
2447 #
2448 # We abort by default to avoid the thundering herd of
2448 # We abort by default to avoid the thundering herd of
2449 # clients flooding a server that was expecting expensive
2449 # clients flooding a server that was expecting expensive
2450 # clone load to be offloaded.
2450 # clone load to be offloaded.
2451 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2451 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2452 repo.ui.warn(_('falling back to normal clone\n'))
2452 repo.ui.warn(_('falling back to normal clone\n'))
2453 else:
2453 else:
2454 raise error.Abort(_('error applying bundle'),
2454 raise error.Abort(_('error applying bundle'),
2455 hint=_('if this error persists, consider contacting '
2455 hint=_('if this error persists, consider contacting '
2456 'the server operator or disable clone '
2456 'the server operator or disable clone '
2457 'bundles via '
2457 'bundles via '
2458 '"--config ui.clonebundles=false"'))
2458 '"--config ui.clonebundles=false"'))
2459
2459
2460 def parseclonebundlesmanifest(repo, s):
2460 def parseclonebundlesmanifest(repo, s):
2461 """Parses the raw text of a clone bundles manifest.
2461 """Parses the raw text of a clone bundles manifest.
2462
2462
2463 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2463 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2464 to the URL and other keys are the attributes for the entry.
2464 to the URL and other keys are the attributes for the entry.
2465 """
2465 """
2466 m = []
2466 m = []
2467 for line in s.splitlines():
2467 for line in s.splitlines():
2468 fields = line.split()
2468 fields = line.split()
2469 if not fields:
2469 if not fields:
2470 continue
2470 continue
2471 attrs = {'URL': fields[0]}
2471 attrs = {'URL': fields[0]}
2472 for rawattr in fields[1:]:
2472 for rawattr in fields[1:]:
2473 key, value = rawattr.split('=', 1)
2473 key, value = rawattr.split('=', 1)
2474 key = urlreq.unquote(key)
2474 key = urlreq.unquote(key)
2475 value = urlreq.unquote(value)
2475 value = urlreq.unquote(value)
2476 attrs[key] = value
2476 attrs[key] = value
2477
2477
2478 # Parse BUNDLESPEC into components. This makes client-side
2478 # Parse BUNDLESPEC into components. This makes client-side
2479 # preferences easier to specify since you can prefer a single
2479 # preferences easier to specify since you can prefer a single
2480 # component of the BUNDLESPEC.
2480 # component of the BUNDLESPEC.
2481 if key == 'BUNDLESPEC':
2481 if key == 'BUNDLESPEC':
2482 try:
2482 try:
2483 bundlespec = parsebundlespec(repo, value)
2483 bundlespec = parsebundlespec(repo, value)
2484 attrs['COMPRESSION'] = bundlespec.compression
2484 attrs['COMPRESSION'] = bundlespec.compression
2485 attrs['VERSION'] = bundlespec.version
2485 attrs['VERSION'] = bundlespec.version
2486 except error.InvalidBundleSpecification:
2486 except error.InvalidBundleSpecification:
2487 pass
2487 pass
2488 except error.UnsupportedBundleSpecification:
2488 except error.UnsupportedBundleSpecification:
2489 pass
2489 pass
2490
2490
2491 m.append(attrs)
2491 m.append(attrs)
2492
2492
2493 return m
2493 return m
2494
2494
2495 def isstreamclonespec(bundlespec):
2495 def isstreamclonespec(bundlespec):
2496 # Stream clone v1
2496 # Stream clone v1
2497 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2497 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2498 return True
2498 return True
2499
2499
2500 # Stream clone v2
2500 # Stream clone v2
2501 if (bundlespec.wirecompression == 'UN' and \
2501 if (bundlespec.wirecompression == 'UN' and \
2502 bundlespec.wireversion == '02' and \
2502 bundlespec.wireversion == '02' and \
2503 bundlespec.contentopts.get('streamv2')):
2503 bundlespec.contentopts.get('streamv2')):
2504 return True
2504 return True
2505
2505
2506 return False
2506 return False
2507
2507
2508 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2508 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2509 """Remove incompatible clone bundle manifest entries.
2509 """Remove incompatible clone bundle manifest entries.
2510
2510
2511 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2511 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2512 and returns a new list consisting of only the entries that this client
2512 and returns a new list consisting of only the entries that this client
2513 should be able to apply.
2513 should be able to apply.
2514
2514
2515 There is no guarantee we'll be able to apply all returned entries because
2515 There is no guarantee we'll be able to apply all returned entries because
2516 the metadata we use to filter on may be missing or wrong.
2516 the metadata we use to filter on may be missing or wrong.
2517 """
2517 """
2518 newentries = []
2518 newentries = []
2519 for entry in entries:
2519 for entry in entries:
2520 spec = entry.get('BUNDLESPEC')
2520 spec = entry.get('BUNDLESPEC')
2521 if spec:
2521 if spec:
2522 try:
2522 try:
2523 bundlespec = parsebundlespec(repo, spec, strict=True)
2523 bundlespec = parsebundlespec(repo, spec, strict=True)
2524
2524
2525 # If a stream clone was requested, filter out non-streamclone
2525 # If a stream clone was requested, filter out non-streamclone
2526 # entries.
2526 # entries.
2527 if streamclonerequested and not isstreamclonespec(bundlespec):
2527 if streamclonerequested and not isstreamclonespec(bundlespec):
2528 repo.ui.debug('filtering %s because not a stream clone\n' %
2528 repo.ui.debug('filtering %s because not a stream clone\n' %
2529 entry['URL'])
2529 entry['URL'])
2530 continue
2530 continue
2531
2531
2532 except error.InvalidBundleSpecification as e:
2532 except error.InvalidBundleSpecification as e:
2533 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2533 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2534 continue
2534 continue
2535 except error.UnsupportedBundleSpecification as e:
2535 except error.UnsupportedBundleSpecification as e:
2536 repo.ui.debug('filtering %s because unsupported bundle '
2536 repo.ui.debug('filtering %s because unsupported bundle '
2537 'spec: %s\n' % (
2537 'spec: %s\n' % (
2538 entry['URL'], stringutil.forcebytestr(e)))
2538 entry['URL'], stringutil.forcebytestr(e)))
2539 continue
2539 continue
2540 # If we don't have a spec and requested a stream clone, we don't know
2540 # If we don't have a spec and requested a stream clone, we don't know
2541 # what the entry is so don't attempt to apply it.
2541 # what the entry is so don't attempt to apply it.
2542 elif streamclonerequested:
2542 elif streamclonerequested:
2543 repo.ui.debug('filtering %s because cannot determine if a stream '
2543 repo.ui.debug('filtering %s because cannot determine if a stream '
2544 'clone bundle\n' % entry['URL'])
2544 'clone bundle\n' % entry['URL'])
2545 continue
2545 continue
2546
2546
2547 if 'REQUIRESNI' in entry and not sslutil.hassni:
2547 if 'REQUIRESNI' in entry and not sslutil.hassni:
2548 repo.ui.debug('filtering %s because SNI not supported\n' %
2548 repo.ui.debug('filtering %s because SNI not supported\n' %
2549 entry['URL'])
2549 entry['URL'])
2550 continue
2550 continue
2551
2551
2552 newentries.append(entry)
2552 newentries.append(entry)
2553
2553
2554 return newentries
2554 return newentries
2555
2555
2556 class clonebundleentry(object):
2556 class clonebundleentry(object):
2557 """Represents an item in a clone bundles manifest.
2557 """Represents an item in a clone bundles manifest.
2558
2558
2559 This rich class is needed to support sorting since sorted() in Python 3
2559 This rich class is needed to support sorting since sorted() in Python 3
2560 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2560 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2561 won't work.
2561 won't work.
2562 """
2562 """
2563
2563
2564 def __init__(self, value, prefers):
2564 def __init__(self, value, prefers):
2565 self.value = value
2565 self.value = value
2566 self.prefers = prefers
2566 self.prefers = prefers
2567
2567
2568 def _cmp(self, other):
2568 def _cmp(self, other):
2569 for prefkey, prefvalue in self.prefers:
2569 for prefkey, prefvalue in self.prefers:
2570 avalue = self.value.get(prefkey)
2570 avalue = self.value.get(prefkey)
2571 bvalue = other.value.get(prefkey)
2571 bvalue = other.value.get(prefkey)
2572
2572
2573 # Special case for b missing attribute and a matches exactly.
2573 # Special case for b missing attribute and a matches exactly.
2574 if avalue is not None and bvalue is None and avalue == prefvalue:
2574 if avalue is not None and bvalue is None and avalue == prefvalue:
2575 return -1
2575 return -1
2576
2576
2577 # Special case for a missing attribute and b matches exactly.
2577 # Special case for a missing attribute and b matches exactly.
2578 if bvalue is not None and avalue is None and bvalue == prefvalue:
2578 if bvalue is not None and avalue is None and bvalue == prefvalue:
2579 return 1
2579 return 1
2580
2580
2581 # We can't compare unless attribute present on both.
2581 # We can't compare unless attribute present on both.
2582 if avalue is None or bvalue is None:
2582 if avalue is None or bvalue is None:
2583 continue
2583 continue
2584
2584
2585 # Same values should fall back to next attribute.
2585 # Same values should fall back to next attribute.
2586 if avalue == bvalue:
2586 if avalue == bvalue:
2587 continue
2587 continue
2588
2588
2589 # Exact matches come first.
2589 # Exact matches come first.
2590 if avalue == prefvalue:
2590 if avalue == prefvalue:
2591 return -1
2591 return -1
2592 if bvalue == prefvalue:
2592 if bvalue == prefvalue:
2593 return 1
2593 return 1
2594
2594
2595 # Fall back to next attribute.
2595 # Fall back to next attribute.
2596 continue
2596 continue
2597
2597
2598 # If we got here we couldn't sort by attributes and prefers. Fall
2598 # If we got here we couldn't sort by attributes and prefers. Fall
2599 # back to index order.
2599 # back to index order.
2600 return 0
2600 return 0
2601
2601
2602 def __lt__(self, other):
2602 def __lt__(self, other):
2603 return self._cmp(other) < 0
2603 return self._cmp(other) < 0
2604
2604
2605 def __gt__(self, other):
2605 def __gt__(self, other):
2606 return self._cmp(other) > 0
2606 return self._cmp(other) > 0
2607
2607
2608 def __eq__(self, other):
2608 def __eq__(self, other):
2609 return self._cmp(other) == 0
2609 return self._cmp(other) == 0
2610
2610
2611 def __le__(self, other):
2611 def __le__(self, other):
2612 return self._cmp(other) <= 0
2612 return self._cmp(other) <= 0
2613
2613
2614 def __ge__(self, other):
2614 def __ge__(self, other):
2615 return self._cmp(other) >= 0
2615 return self._cmp(other) >= 0
2616
2616
2617 def __ne__(self, other):
2617 def __ne__(self, other):
2618 return self._cmp(other) != 0
2618 return self._cmp(other) != 0
2619
2619
2620 def sortclonebundleentries(ui, entries):
2620 def sortclonebundleentries(ui, entries):
2621 prefers = ui.configlist('ui', 'clonebundleprefers')
2621 prefers = ui.configlist('ui', 'clonebundleprefers')
2622 if not prefers:
2622 if not prefers:
2623 return list(entries)
2623 return list(entries)
2624
2624
2625 prefers = [p.split('=', 1) for p in prefers]
2625 prefers = [p.split('=', 1) for p in prefers]
2626
2626
2627 items = sorted(clonebundleentry(v, prefers) for v in entries)
2627 items = sorted(clonebundleentry(v, prefers) for v in entries)
2628 return [i.value for i in items]
2628 return [i.value for i in items]
2629
2629
2630 def trypullbundlefromurl(ui, repo, url):
2630 def trypullbundlefromurl(ui, repo, url):
2631 """Attempt to apply a bundle from a URL."""
2631 """Attempt to apply a bundle from a URL."""
2632 with repo.lock(), repo.transaction('bundleurl') as tr:
2632 with repo.lock(), repo.transaction('bundleurl') as tr:
2633 try:
2633 try:
2634 fh = urlmod.open(ui, url)
2634 fh = urlmod.open(ui, url)
2635 cg = readbundle(ui, fh, 'stream')
2635 cg = readbundle(ui, fh, 'stream')
2636
2636
2637 if isinstance(cg, streamclone.streamcloneapplier):
2637 if isinstance(cg, streamclone.streamcloneapplier):
2638 cg.apply(repo)
2638 cg.apply(repo)
2639 else:
2639 else:
2640 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2640 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2641 return True
2641 return True
2642 except urlerr.httperror as e:
2642 except urlerr.httperror as e:
2643 ui.warn(_('HTTP error fetching bundle: %s\n') %
2643 ui.warn(_('HTTP error fetching bundle: %s\n') %
2644 stringutil.forcebytestr(e))
2644 stringutil.forcebytestr(e))
2645 except urlerr.urlerror as e:
2645 except urlerr.urlerror as e:
2646 ui.warn(_('error fetching bundle: %s\n') %
2646 ui.warn(_('error fetching bundle: %s\n') %
2647 stringutil.forcebytestr(e.reason))
2647 stringutil.forcebytestr(e.reason))
2648
2648
2649 return False
2649 return False
@@ -1,169 +1,253
1 **Experimental and under active development**
1 **Experimental and under active development**
2
2
3 This section documents the wire protocol commands exposed to transports
3 This section documents the wire protocol commands exposed to transports
4 using the frame-based protocol. The set of commands exposed through
4 using the frame-based protocol. The set of commands exposed through
5 these transports is distinct from the set of commands exposed to legacy
5 these transports is distinct from the set of commands exposed to legacy
6 transports.
6 transports.
7
7
8 The frame-based protocol uses CBOR to encode command execution requests.
8 The frame-based protocol uses CBOR to encode command execution requests.
9 All command arguments must be mapped to a specific or set of CBOR data
9 All command arguments must be mapped to a specific or set of CBOR data
10 types.
10 types.
11
11
12 The response to many commands is also CBOR. There is no common response
12 The response to many commands is also CBOR. There is no common response
13 format: each command defines its own response format.
13 format: each command defines its own response format.
14
14
15 TODOs
15 TODOs
16 =====
16 =====
17
17
18 * Add "node namespace" support to each command. In order to support
18 * Add "node namespace" support to each command. In order to support
19 SHA-1 hash transition, we want servers to be able to expose different
19 SHA-1 hash transition, we want servers to be able to expose different
20 "node namespaces" for the same data. Every command operating on nodes
20 "node namespaces" for the same data. Every command operating on nodes
21 should specify which "node namespace" it is operating on and responses
21 should specify which "node namespace" it is operating on and responses
22 should encode the "node namespace" accordingly.
22 should encode the "node namespace" accordingly.
23
23
24 Commands
24 Commands
25 ========
25 ========
26
26
27 The sections below detail all commands available to wire protocol version
27 The sections below detail all commands available to wire protocol version
28 2.
28 2.
29
29
30 branchmap
30 branchmap
31 ---------
31 ---------
32
32
33 Obtain heads in named branches.
33 Obtain heads in named branches.
34
34
35 Receives no arguments.
35 Receives no arguments.
36
36
37 The response is a map with bytestring keys defining the branch name.
37 The response is a map with bytestring keys defining the branch name.
38 Values are arrays of bytestring defining raw changeset nodes.
38 Values are arrays of bytestring defining raw changeset nodes.
39
39
40 capabilities
40 capabilities
41 ------------
41 ------------
42
42
43 Obtain the server's capabilities.
43 Obtain the server's capabilities.
44
44
45 Receives no arguments.
45 Receives no arguments.
46
46
47 This command is typically called only as part of the handshake during
47 This command is typically called only as part of the handshake during
48 initial connection establishment.
48 initial connection establishment.
49
49
50 The response is a map with bytestring keys defining server information.
50 The response is a map with bytestring keys defining server information.
51
51
52 The defined keys are:
52 The defined keys are:
53
53
54 commands
54 commands
55 A map defining available wire protocol commands on this server.
55 A map defining available wire protocol commands on this server.
56
56
57 Keys in the map are the names of commands that can be invoked. Values
57 Keys in the map are the names of commands that can be invoked. Values
58 are maps defining information about that command. The bytestring keys
58 are maps defining information about that command. The bytestring keys
59 are:
59 are:
60
60
61 args
61 args
62 A map of argument names and their expected types.
62 A map of argument names and their expected types.
63
63
64 Types are defined as a representative value for the expected type.
64 Types are defined as a representative value for the expected type.
65 e.g. an argument expecting a boolean type will have its value
65 e.g. an argument expecting a boolean type will have its value
66 set to true. An integer type will have its value set to 42. The
66 set to true. An integer type will have its value set to 42. The
67 actual values are arbitrary and may not have meaning.
67 actual values are arbitrary and may not have meaning.
68 permissions
68 permissions
69 An array of permissions required to execute this command.
69 An array of permissions required to execute this command.
70
70
71 compression
71 compression
72 An array of maps defining available compression format support.
72 An array of maps defining available compression format support.
73
73
74 The array is sorted from most preferred to least preferred.
74 The array is sorted from most preferred to least preferred.
75
75
76 Each entry has the following bytestring keys:
76 Each entry has the following bytestring keys:
77
77
78 name
78 name
79 Name of the compression engine. e.g. ``zstd`` or ``zlib``.
79 Name of the compression engine. e.g. ``zstd`` or ``zlib``.
80
80
81 framingmediatypes
81 framingmediatypes
82 An array of bytestrings defining the supported framing protocol
82 An array of bytestrings defining the supported framing protocol
83 media types. Servers will not accept media types not in this list.
83 media types. Servers will not accept media types not in this list.
84
84
85 rawrepoformats
85 rawrepoformats
86 An array of storage formats the repository is using. This set of
86 An array of storage formats the repository is using. This set of
87 requirements can be used to determine whether a client can read a
87 requirements can be used to determine whether a client can read a
88 *raw* copy of file data available.
88 *raw* copy of file data available.
89
89
90 changesetdata
91 -------------
92
93 Obtain various data related to changesets.
94
95 The command accepts the following arguments:
96
97 noderange
98 (array of arrays of bytestrings) An array of 2 elements, each being an
99 array of node bytestrings. The first array denotes the changelog revisions
100 that are already known to the client. The second array denotes the changelog
101 revision DAG heads to fetch. The argument essentially defines a DAG range
102 bounded by root and head nodes to fetch.
103
104 The roots array may be empty. The heads array must be defined.
105
106 nodes
107 (array of bytestrings) Changelog revisions to request explicitly.
108
109 fields
110 (set of bytestring) Which data associated with changelog revisions to
111 fetch. The following values are recognized:
112
113 parents
114 Parent revisions.
115
116 revision
117 The raw, revision data for the changelog entry. The hash of this data
118 will match the revision's node value.
119
120 The server resolves the set of revisions relevant to the request by taking
121 the union of the ``noderange`` and ``nodes`` arguments. At least one of these
122 arguments must be defined.
123
124 The response bytestream starts with a CBOR map describing the data that follows.
125 This map has the following bytestring keys:
126
127 totalitems
128 (unsigned integer) Total number of changelog revisions whose data is being
129 transferred.
130
131 Following the map header is a series of 0 or more CBOR values. If values
132 are present, the first value will always be a map describing a single changeset
133 revision. If revision data is requested, the raw revision data (encoded as
134 a CBOR bytestring) will follow the map describing it. Otherwise, another CBOR
135 map describing the next changeset revision will occur.
136
137 Each map has the following bytestring keys:
138
139 node
140 (bytestring) The node value for this revision. This is the SHA-1 hash of
141 the raw revision data.
142
143 parents (optional)
144 (array of bytestrings) The nodes representing the parent revisions of this
145 revision. Only present if ``parents`` data is being requested.
146
147 revisionsize (optional)
148 (unsigned integer) Indicates the size of raw revision data that follows this
149 map. The following data contains a serialized form of the changeset data,
150 including the author, date, commit message, set of changed files, manifest
151 node, and other metadata.
152
153 Only present if ``revision`` data was requested and the data follows this
154 map.
155
156 If nodes are requested via ``noderange``, they will be emitted in DAG order,
157 parents always before children.
158
159 If nodes are requested via ``nodes``, they will be emitted in requested order.
160
161 Nodes from ``nodes`` are emitted before nodes from ``noderange``.
162
163 TODO support different revision selection mechanisms (e.g. non-public, specific
164 revisions)
165 TODO support different hash "namespaces" for revisions (e.g. sha-1 versus other)
166 TODO support emitting phases data
167 TODO support emitting bookmarks data
168 TODO support emitting obsolescence data
169 TODO support filtering based on relevant paths (narrow clone)
170 TODO support depth limiting
171 TODO support hgtagsfnodes cache / tags data
172 TODO support branch heads cache
173
90 heads
174 heads
91 -----
175 -----
92
176
93 Obtain DAG heads in the repository.
177 Obtain DAG heads in the repository.
94
178
95 The command accepts the following arguments:
179 The command accepts the following arguments:
96
180
97 publiconly (optional)
181 publiconly (optional)
98 (boolean) If set, operate on the DAG for public phase changesets only.
182 (boolean) If set, operate on the DAG for public phase changesets only.
99 Non-public (i.e. draft) phase DAG heads will not be returned.
183 Non-public (i.e. draft) phase DAG heads will not be returned.
100
184
101 The response is a CBOR array of bytestrings defining changeset nodes
185 The response is a CBOR array of bytestrings defining changeset nodes
102 of DAG heads. The array can be empty if the repository is empty or no
186 of DAG heads. The array can be empty if the repository is empty or no
103 changesets satisfied the request.
187 changesets satisfied the request.
104
188
105 TODO consider exposing phase of heads in response
189 TODO consider exposing phase of heads in response
106
190
107 known
191 known
108 -----
192 -----
109
193
110 Determine whether a series of changeset nodes is known to the server.
194 Determine whether a series of changeset nodes is known to the server.
111
195
112 The command accepts the following arguments:
196 The command accepts the following arguments:
113
197
114 nodes
198 nodes
115 (array of bytestrings) List of changeset nodes whose presence to
199 (array of bytestrings) List of changeset nodes whose presence to
116 query.
200 query.
117
201
118 The response is a bytestring where each byte contains a 0 or 1 for the
202 The response is a bytestring where each byte contains a 0 or 1 for the
119 corresponding requested node at the same index.
203 corresponding requested node at the same index.
120
204
121 TODO use a bit array for even more compact response
205 TODO use a bit array for even more compact response
122
206
123 listkeys
207 listkeys
124 --------
208 --------
125
209
126 List values in a specified ``pushkey`` namespace.
210 List values in a specified ``pushkey`` namespace.
127
211
128 The command receives the following arguments:
212 The command receives the following arguments:
129
213
130 namespace
214 namespace
131 (bytestring) Pushkey namespace to query.
215 (bytestring) Pushkey namespace to query.
132
216
133 The response is a map with bytestring keys and values.
217 The response is a map with bytestring keys and values.
134
218
135 TODO consider using binary to represent nodes in certain pushkey namespaces.
219 TODO consider using binary to represent nodes in certain pushkey namespaces.
136
220
137 lookup
221 lookup
138 ------
222 ------
139
223
140 Try to resolve a value to a changeset revision.
224 Try to resolve a value to a changeset revision.
141
225
142 Unlike ``known`` which operates on changeset nodes, lookup operates on
226 Unlike ``known`` which operates on changeset nodes, lookup operates on
143 node fragments and other names that a user may use.
227 node fragments and other names that a user may use.
144
228
145 The command receives the following arguments:
229 The command receives the following arguments:
146
230
147 key
231 key
148 (bytestring) Value to try to resolve.
232 (bytestring) Value to try to resolve.
149
233
150 On success, returns a bytestring containing the resolved node.
234 On success, returns a bytestring containing the resolved node.
151
235
152 pushkey
236 pushkey
153 -------
237 -------
154
238
155 Set a value using the ``pushkey`` protocol.
239 Set a value using the ``pushkey`` protocol.
156
240
157 The command receives the following arguments:
241 The command receives the following arguments:
158
242
159 namespace
243 namespace
160 (bytestring) Pushkey namespace to operate on.
244 (bytestring) Pushkey namespace to operate on.
161 key
245 key
162 (bytestring) The pushkey key to set.
246 (bytestring) The pushkey key to set.
163 old
247 old
164 (bytestring) Old value for this key.
248 (bytestring) Old value for this key.
165 new
249 new
166 (bytestring) New value for this key.
250 (bytestring) New value for this key.
167
251
168 TODO consider using binary to represent nodes is certain pushkey namespaces.
252 TODO consider using binary to represent nodes is certain pushkey namespaces.
169 TODO better define response type and meaning.
253 TODO better define response type and meaning.
@@ -1,1007 +1,1006
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 repository,
24 repository,
25 statichttprepo,
25 statichttprepo,
26 url as urlmod,
26 url as urlmod,
27 util,
27 util,
28 wireprotoframing,
28 wireprotoframing,
29 wireprototypes,
29 wireprototypes,
30 wireprotov1peer,
30 wireprotov1peer,
31 wireprotov2peer,
31 wireprotov2peer,
32 wireprotov2server,
32 wireprotov2server,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 cborutil,
35 cborutil,
36 interfaceutil,
36 interfaceutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 httplib = util.httplib
40 httplib = util.httplib
41 urlerr = util.urlerr
41 urlerr = util.urlerr
42 urlreq = util.urlreq
42 urlreq = util.urlreq
43
43
44 def encodevalueinheaders(value, header, limit):
44 def encodevalueinheaders(value, header, limit):
45 """Encode a string value into multiple HTTP headers.
45 """Encode a string value into multiple HTTP headers.
46
46
47 ``value`` will be encoded into 1 or more HTTP headers with the names
47 ``value`` will be encoded into 1 or more HTTP headers with the names
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
49 name + value will be at most ``limit`` bytes long.
49 name + value will be at most ``limit`` bytes long.
50
50
51 Returns an iterable of 2-tuples consisting of header names and
51 Returns an iterable of 2-tuples consisting of header names and
52 values as native strings.
52 values as native strings.
53 """
53 """
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
55 # not bytes. This function always takes bytes in as arguments.
55 # not bytes. This function always takes bytes in as arguments.
56 fmt = pycompat.strurl(header) + r'-%s'
56 fmt = pycompat.strurl(header) + r'-%s'
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
58 # and not a unicode: we're just getting the encoded length anyway,
58 # and not a unicode: we're just getting the encoded length anyway,
59 # and using an r-string to make it portable between Python 2 and 3
59 # and using an r-string to make it portable between Python 2 and 3
60 # doesn't work because then the \r is a literal backslash-r
60 # doesn't work because then the \r is a literal backslash-r
61 # instead of a carriage return.
61 # instead of a carriage return.
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
63 result = []
63 result = []
64
64
65 n = 0
65 n = 0
66 for i in pycompat.xrange(0, len(value), valuelen):
66 for i in pycompat.xrange(0, len(value), valuelen):
67 n += 1
67 n += 1
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
69
69
70 return result
70 return result
71
71
72 def _wraphttpresponse(resp):
72 def _wraphttpresponse(resp):
73 """Wrap an HTTPResponse with common error handlers.
73 """Wrap an HTTPResponse with common error handlers.
74
74
75 This ensures that any I/O from any consumer raises the appropriate
75 This ensures that any I/O from any consumer raises the appropriate
76 error and messaging.
76 error and messaging.
77 """
77 """
78 origread = resp.read
78 origread = resp.read
79
79
80 class readerproxy(resp.__class__):
80 class readerproxy(resp.__class__):
81 def read(self, size=None):
81 def read(self, size=None):
82 try:
82 try:
83 return origread(size)
83 return origread(size)
84 except httplib.IncompleteRead as e:
84 except httplib.IncompleteRead as e:
85 # e.expected is an integer if length known or None otherwise.
85 # e.expected is an integer if length known or None otherwise.
86 if e.expected:
86 if e.expected:
87 got = len(e.partial)
87 got = len(e.partial)
88 total = e.expected + got
88 total = e.expected + got
89 msg = _('HTTP request error (incomplete response; '
89 msg = _('HTTP request error (incomplete response; '
90 'expected %d bytes got %d)') % (total, got)
90 'expected %d bytes got %d)') % (total, got)
91 else:
91 else:
92 msg = _('HTTP request error (incomplete response)')
92 msg = _('HTTP request error (incomplete response)')
93
93
94 raise error.PeerTransportError(
94 raise error.PeerTransportError(
95 msg,
95 msg,
96 hint=_('this may be an intermittent network failure; '
96 hint=_('this may be an intermittent network failure; '
97 'if the error persists, consider contacting the '
97 'if the error persists, consider contacting the '
98 'network or server operator'))
98 'network or server operator'))
99 except httplib.HTTPException as e:
99 except httplib.HTTPException as e:
100 raise error.PeerTransportError(
100 raise error.PeerTransportError(
101 _('HTTP request error (%s)') % e,
101 _('HTTP request error (%s)') % e,
102 hint=_('this may be an intermittent network failure; '
102 hint=_('this may be an intermittent network failure; '
103 'if the error persists, consider contacting the '
103 'if the error persists, consider contacting the '
104 'network or server operator'))
104 'network or server operator'))
105
105
106 resp.__class__ = readerproxy
106 resp.__class__ = readerproxy
107
107
108 class _multifile(object):
108 class _multifile(object):
109 def __init__(self, *fileobjs):
109 def __init__(self, *fileobjs):
110 for f in fileobjs:
110 for f in fileobjs:
111 if not util.safehasattr(f, 'length'):
111 if not util.safehasattr(f, 'length'):
112 raise ValueError(
112 raise ValueError(
113 '_multifile only supports file objects that '
113 '_multifile only supports file objects that '
114 'have a length but this one does not:', type(f), f)
114 'have a length but this one does not:', type(f), f)
115 self._fileobjs = fileobjs
115 self._fileobjs = fileobjs
116 self._index = 0
116 self._index = 0
117
117
118 @property
118 @property
119 def length(self):
119 def length(self):
120 return sum(f.length for f in self._fileobjs)
120 return sum(f.length for f in self._fileobjs)
121
121
122 def read(self, amt=None):
122 def read(self, amt=None):
123 if amt <= 0:
123 if amt <= 0:
124 return ''.join(f.read() for f in self._fileobjs)
124 return ''.join(f.read() for f in self._fileobjs)
125 parts = []
125 parts = []
126 while amt and self._index < len(self._fileobjs):
126 while amt and self._index < len(self._fileobjs):
127 parts.append(self._fileobjs[self._index].read(amt))
127 parts.append(self._fileobjs[self._index].read(amt))
128 got = len(parts[-1])
128 got = len(parts[-1])
129 if got < amt:
129 if got < amt:
130 self._index += 1
130 self._index += 1
131 amt -= got
131 amt -= got
132 return ''.join(parts)
132 return ''.join(parts)
133
133
134 def seek(self, offset, whence=os.SEEK_SET):
134 def seek(self, offset, whence=os.SEEK_SET):
135 if whence != os.SEEK_SET:
135 if whence != os.SEEK_SET:
136 raise NotImplementedError(
136 raise NotImplementedError(
137 '_multifile does not support anything other'
137 '_multifile does not support anything other'
138 ' than os.SEEK_SET for whence on seek()')
138 ' than os.SEEK_SET for whence on seek()')
139 if offset != 0:
139 if offset != 0:
140 raise NotImplementedError(
140 raise NotImplementedError(
141 '_multifile only supports seeking to start, but that '
141 '_multifile only supports seeking to start, but that '
142 'could be fixed if you need it')
142 'could be fixed if you need it')
143 for f in self._fileobjs:
143 for f in self._fileobjs:
144 f.seek(0)
144 f.seek(0)
145 self._index = 0
145 self._index = 0
146
146
147 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
147 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
148 repobaseurl, cmd, args):
148 repobaseurl, cmd, args):
149 """Make an HTTP request to run a command for a version 1 client.
149 """Make an HTTP request to run a command for a version 1 client.
150
150
151 ``caps`` is a set of known server capabilities. The value may be
151 ``caps`` is a set of known server capabilities. The value may be
152 None if capabilities are not yet known.
152 None if capabilities are not yet known.
153
153
154 ``capablefn`` is a function to evaluate a capability.
154 ``capablefn`` is a function to evaluate a capability.
155
155
156 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
156 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
157 raw data to pass to it.
157 raw data to pass to it.
158 """
158 """
159 if cmd == 'pushkey':
159 if cmd == 'pushkey':
160 args['data'] = ''
160 args['data'] = ''
161 data = args.pop('data', None)
161 data = args.pop('data', None)
162 headers = args.pop('headers', {})
162 headers = args.pop('headers', {})
163
163
164 ui.debug("sending %s command\n" % cmd)
164 ui.debug("sending %s command\n" % cmd)
165 q = [('cmd', cmd)]
165 q = [('cmd', cmd)]
166 headersize = 0
166 headersize = 0
167 # Important: don't use self.capable() here or else you end up
167 # Important: don't use self.capable() here or else you end up
168 # with infinite recursion when trying to look up capabilities
168 # with infinite recursion when trying to look up capabilities
169 # for the first time.
169 # for the first time.
170 postargsok = caps is not None and 'httppostargs' in caps
170 postargsok = caps is not None and 'httppostargs' in caps
171
171
172 # Send arguments via POST.
172 # Send arguments via POST.
173 if postargsok and args:
173 if postargsok and args:
174 strargs = urlreq.urlencode(sorted(args.items()))
174 strargs = urlreq.urlencode(sorted(args.items()))
175 if not data:
175 if not data:
176 data = strargs
176 data = strargs
177 else:
177 else:
178 if isinstance(data, bytes):
178 if isinstance(data, bytes):
179 i = io.BytesIO(data)
179 i = io.BytesIO(data)
180 i.length = len(data)
180 i.length = len(data)
181 data = i
181 data = i
182 argsio = io.BytesIO(strargs)
182 argsio = io.BytesIO(strargs)
183 argsio.length = len(strargs)
183 argsio.length = len(strargs)
184 data = _multifile(argsio, data)
184 data = _multifile(argsio, data)
185 headers[r'X-HgArgs-Post'] = len(strargs)
185 headers[r'X-HgArgs-Post'] = len(strargs)
186 elif args:
186 elif args:
187 # Calling self.capable() can infinite loop if we are calling
187 # Calling self.capable() can infinite loop if we are calling
188 # "capabilities". But that command should never accept wire
188 # "capabilities". But that command should never accept wire
189 # protocol arguments. So this should never happen.
189 # protocol arguments. So this should never happen.
190 assert cmd != 'capabilities'
190 assert cmd != 'capabilities'
191 httpheader = capablefn('httpheader')
191 httpheader = capablefn('httpheader')
192 if httpheader:
192 if httpheader:
193 headersize = int(httpheader.split(',', 1)[0])
193 headersize = int(httpheader.split(',', 1)[0])
194
194
195 # Send arguments via HTTP headers.
195 # Send arguments via HTTP headers.
196 if headersize > 0:
196 if headersize > 0:
197 # The headers can typically carry more data than the URL.
197 # The headers can typically carry more data than the URL.
198 encargs = urlreq.urlencode(sorted(args.items()))
198 encargs = urlreq.urlencode(sorted(args.items()))
199 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
199 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
200 headersize):
200 headersize):
201 headers[header] = value
201 headers[header] = value
202 # Send arguments via query string (Mercurial <1.9).
202 # Send arguments via query string (Mercurial <1.9).
203 else:
203 else:
204 q += sorted(args.items())
204 q += sorted(args.items())
205
205
206 qs = '?%s' % urlreq.urlencode(q)
206 qs = '?%s' % urlreq.urlencode(q)
207 cu = "%s%s" % (repobaseurl, qs)
207 cu = "%s%s" % (repobaseurl, qs)
208 size = 0
208 size = 0
209 if util.safehasattr(data, 'length'):
209 if util.safehasattr(data, 'length'):
210 size = data.length
210 size = data.length
211 elif data is not None:
211 elif data is not None:
212 size = len(data)
212 size = len(data)
213 if data is not None and r'Content-Type' not in headers:
213 if data is not None and r'Content-Type' not in headers:
214 headers[r'Content-Type'] = r'application/mercurial-0.1'
214 headers[r'Content-Type'] = r'application/mercurial-0.1'
215
215
216 # Tell the server we accept application/mercurial-0.2 and multiple
216 # Tell the server we accept application/mercurial-0.2 and multiple
217 # compression formats if the server is capable of emitting those
217 # compression formats if the server is capable of emitting those
218 # payloads.
218 # payloads.
219 # Note: Keep this set empty by default, as client advertisement of
219 # Note: Keep this set empty by default, as client advertisement of
220 # protocol parameters should only occur after the handshake.
220 # protocol parameters should only occur after the handshake.
221 protoparams = set()
221 protoparams = set()
222
222
223 mediatypes = set()
223 mediatypes = set()
224 if caps is not None:
224 if caps is not None:
225 mt = capablefn('httpmediatype')
225 mt = capablefn('httpmediatype')
226 if mt:
226 if mt:
227 protoparams.add('0.1')
227 protoparams.add('0.1')
228 mediatypes = set(mt.split(','))
228 mediatypes = set(mt.split(','))
229
229
230 protoparams.add('partial-pull')
230 protoparams.add('partial-pull')
231
231
232 if '0.2tx' in mediatypes:
232 if '0.2tx' in mediatypes:
233 protoparams.add('0.2')
233 protoparams.add('0.2')
234
234
235 if '0.2tx' in mediatypes and capablefn('compression'):
235 if '0.2tx' in mediatypes and capablefn('compression'):
236 # We /could/ compare supported compression formats and prune
236 # We /could/ compare supported compression formats and prune
237 # non-mutually supported or error if nothing is mutually supported.
237 # non-mutually supported or error if nothing is mutually supported.
238 # For now, send the full list to the server and have it error.
238 # For now, send the full list to the server and have it error.
239 comps = [e.wireprotosupport().name for e in
239 comps = [e.wireprotosupport().name for e in
240 util.compengines.supportedwireengines(util.CLIENTROLE)]
240 util.compengines.supportedwireengines(util.CLIENTROLE)]
241 protoparams.add('comp=%s' % ','.join(comps))
241 protoparams.add('comp=%s' % ','.join(comps))
242
242
243 if protoparams:
243 if protoparams:
244 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
244 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
245 'X-HgProto',
245 'X-HgProto',
246 headersize or 1024)
246 headersize or 1024)
247 for header, value in protoheaders:
247 for header, value in protoheaders:
248 headers[header] = value
248 headers[header] = value
249
249
250 varyheaders = []
250 varyheaders = []
251 for header in headers:
251 for header in headers:
252 if header.lower().startswith(r'x-hg'):
252 if header.lower().startswith(r'x-hg'):
253 varyheaders.append(header)
253 varyheaders.append(header)
254
254
255 if varyheaders:
255 if varyheaders:
256 headers[r'Vary'] = r','.join(sorted(varyheaders))
256 headers[r'Vary'] = r','.join(sorted(varyheaders))
257
257
258 req = requestbuilder(pycompat.strurl(cu), data, headers)
258 req = requestbuilder(pycompat.strurl(cu), data, headers)
259
259
260 if data is not None:
260 if data is not None:
261 ui.debug("sending %d bytes\n" % size)
261 ui.debug("sending %d bytes\n" % size)
262 req.add_unredirected_header(r'Content-Length', r'%d' % size)
262 req.add_unredirected_header(r'Content-Length', r'%d' % size)
263
263
264 return req, cu, qs
264 return req, cu, qs
265
265
266 def _reqdata(req):
266 def _reqdata(req):
267 """Get request data, if any. If no data, returns None."""
267 """Get request data, if any. If no data, returns None."""
268 if pycompat.ispy3:
268 if pycompat.ispy3:
269 return req.data
269 return req.data
270 if not req.has_data():
270 if not req.has_data():
271 return None
271 return None
272 return req.get_data()
272 return req.get_data()
273
273
274 def sendrequest(ui, opener, req):
274 def sendrequest(ui, opener, req):
275 """Send a prepared HTTP request.
275 """Send a prepared HTTP request.
276
276
277 Returns the response object.
277 Returns the response object.
278 """
278 """
279 dbg = ui.debug
279 dbg = ui.debug
280 if (ui.debugflag
280 if (ui.debugflag
281 and ui.configbool('devel', 'debug.peer-request')):
281 and ui.configbool('devel', 'debug.peer-request')):
282 line = 'devel-peer-request: %s\n'
282 line = 'devel-peer-request: %s\n'
283 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
283 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
284 pycompat.bytesurl(req.get_full_url())))
284 pycompat.bytesurl(req.get_full_url())))
285 hgargssize = None
285 hgargssize = None
286
286
287 for header, value in sorted(req.header_items()):
287 for header, value in sorted(req.header_items()):
288 header = pycompat.bytesurl(header)
288 header = pycompat.bytesurl(header)
289 value = pycompat.bytesurl(value)
289 value = pycompat.bytesurl(value)
290 if header.startswith('X-hgarg-'):
290 if header.startswith('X-hgarg-'):
291 if hgargssize is None:
291 if hgargssize is None:
292 hgargssize = 0
292 hgargssize = 0
293 hgargssize += len(value)
293 hgargssize += len(value)
294 else:
294 else:
295 dbg(line % ' %s %s' % (header, value))
295 dbg(line % ' %s %s' % (header, value))
296
296
297 if hgargssize is not None:
297 if hgargssize is not None:
298 dbg(line % ' %d bytes of commands arguments in headers'
298 dbg(line % ' %d bytes of commands arguments in headers'
299 % hgargssize)
299 % hgargssize)
300 data = _reqdata(req)
300 data = _reqdata(req)
301 if data is not None:
301 if data is not None:
302 length = getattr(data, 'length', None)
302 length = getattr(data, 'length', None)
303 if length is None:
303 if length is None:
304 length = len(data)
304 length = len(data)
305 dbg(line % ' %d bytes of data' % length)
305 dbg(line % ' %d bytes of data' % length)
306
306
307 start = util.timer()
307 start = util.timer()
308
308
309 res = None
309 res = None
310 try:
310 try:
311 res = opener.open(req)
311 res = opener.open(req)
312 except urlerr.httperror as inst:
312 except urlerr.httperror as inst:
313 if inst.code == 401:
313 if inst.code == 401:
314 raise error.Abort(_('authorization failed'))
314 raise error.Abort(_('authorization failed'))
315 raise
315 raise
316 except httplib.HTTPException as inst:
316 except httplib.HTTPException as inst:
317 ui.debug('http error requesting %s\n' %
317 ui.debug('http error requesting %s\n' %
318 util.hidepassword(req.get_full_url()))
318 util.hidepassword(req.get_full_url()))
319 ui.traceback()
319 ui.traceback()
320 raise IOError(None, inst)
320 raise IOError(None, inst)
321 finally:
321 finally:
322 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
322 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
323 code = res.code if res else -1
323 code = res.code if res else -1
324 dbg(line % ' finished in %.4f seconds (%d)'
324 dbg(line % ' finished in %.4f seconds (%d)'
325 % (util.timer() - start, code))
325 % (util.timer() - start, code))
326
326
327 # Insert error handlers for common I/O failures.
327 # Insert error handlers for common I/O failures.
328 _wraphttpresponse(res)
328 _wraphttpresponse(res)
329
329
330 return res
330 return res
331
331
332 class RedirectedRepoError(error.RepoError):
332 class RedirectedRepoError(error.RepoError):
333 def __init__(self, msg, respurl):
333 def __init__(self, msg, respurl):
334 super(RedirectedRepoError, self).__init__(msg)
334 super(RedirectedRepoError, self).__init__(msg)
335 self.respurl = respurl
335 self.respurl = respurl
336
336
337 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
337 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
338 allowcbor=False):
338 allowcbor=False):
339 # record the url we got redirected to
339 # record the url we got redirected to
340 redirected = False
340 redirected = False
341 respurl = pycompat.bytesurl(resp.geturl())
341 respurl = pycompat.bytesurl(resp.geturl())
342 if respurl.endswith(qs):
342 if respurl.endswith(qs):
343 respurl = respurl[:-len(qs)]
343 respurl = respurl[:-len(qs)]
344 qsdropped = False
344 qsdropped = False
345 else:
345 else:
346 qsdropped = True
346 qsdropped = True
347
347
348 if baseurl.rstrip('/') != respurl.rstrip('/'):
348 if baseurl.rstrip('/') != respurl.rstrip('/'):
349 redirected = True
349 redirected = True
350 if not ui.quiet:
350 if not ui.quiet:
351 ui.warn(_('real URL is %s\n') % respurl)
351 ui.warn(_('real URL is %s\n') % respurl)
352
352
353 try:
353 try:
354 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
354 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
355 except AttributeError:
355 except AttributeError:
356 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
356 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
357
357
358 safeurl = util.hidepassword(baseurl)
358 safeurl = util.hidepassword(baseurl)
359 if proto.startswith('application/hg-error'):
359 if proto.startswith('application/hg-error'):
360 raise error.OutOfBandError(resp.read())
360 raise error.OutOfBandError(resp.read())
361
361
362 # Pre 1.0 versions of Mercurial used text/plain and
362 # Pre 1.0 versions of Mercurial used text/plain and
363 # application/hg-changegroup. We don't support such old servers.
363 # application/hg-changegroup. We don't support such old servers.
364 if not proto.startswith('application/mercurial-'):
364 if not proto.startswith('application/mercurial-'):
365 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
365 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
366 msg = _("'%s' does not appear to be an hg repository:\n"
366 msg = _("'%s' does not appear to be an hg repository:\n"
367 "---%%<--- (%s)\n%s\n---%%<---\n") % (
367 "---%%<--- (%s)\n%s\n---%%<---\n") % (
368 safeurl, proto or 'no content-type', resp.read(1024))
368 safeurl, proto or 'no content-type', resp.read(1024))
369
369
370 # Some servers may strip the query string from the redirect. We
370 # Some servers may strip the query string from the redirect. We
371 # raise a special error type so callers can react to this specially.
371 # raise a special error type so callers can react to this specially.
372 if redirected and qsdropped:
372 if redirected and qsdropped:
373 raise RedirectedRepoError(msg, respurl)
373 raise RedirectedRepoError(msg, respurl)
374 else:
374 else:
375 raise error.RepoError(msg)
375 raise error.RepoError(msg)
376
376
377 try:
377 try:
378 subtype = proto.split('-', 1)[1]
378 subtype = proto.split('-', 1)[1]
379
379
380 # Unless we end up supporting CBOR in the legacy wire protocol,
380 # Unless we end up supporting CBOR in the legacy wire protocol,
381 # this should ONLY be encountered for the initial capabilities
381 # this should ONLY be encountered for the initial capabilities
382 # request during handshake.
382 # request during handshake.
383 if subtype == 'cbor':
383 if subtype == 'cbor':
384 if allowcbor:
384 if allowcbor:
385 return respurl, proto, resp
385 return respurl, proto, resp
386 else:
386 else:
387 raise error.RepoError(_('unexpected CBOR response from '
387 raise error.RepoError(_('unexpected CBOR response from '
388 'server'))
388 'server'))
389
389
390 version_info = tuple([int(n) for n in subtype.split('.')])
390 version_info = tuple([int(n) for n in subtype.split('.')])
391 except ValueError:
391 except ValueError:
392 raise error.RepoError(_("'%s' sent a broken Content-Type "
392 raise error.RepoError(_("'%s' sent a broken Content-Type "
393 "header (%s)") % (safeurl, proto))
393 "header (%s)") % (safeurl, proto))
394
394
395 # TODO consider switching to a decompression reader that uses
395 # TODO consider switching to a decompression reader that uses
396 # generators.
396 # generators.
397 if version_info == (0, 1):
397 if version_info == (0, 1):
398 if compressible:
398 if compressible:
399 resp = util.compengines['zlib'].decompressorreader(resp)
399 resp = util.compengines['zlib'].decompressorreader(resp)
400
400
401 elif version_info == (0, 2):
401 elif version_info == (0, 2):
402 # application/mercurial-0.2 always identifies the compression
402 # application/mercurial-0.2 always identifies the compression
403 # engine in the payload header.
403 # engine in the payload header.
404 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
404 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
405 ename = util.readexactly(resp, elen)
405 ename = util.readexactly(resp, elen)
406 engine = util.compengines.forwiretype(ename)
406 engine = util.compengines.forwiretype(ename)
407
407
408 resp = engine.decompressorreader(resp)
408 resp = engine.decompressorreader(resp)
409 else:
409 else:
410 raise error.RepoError(_("'%s' uses newer protocol %s") %
410 raise error.RepoError(_("'%s' uses newer protocol %s") %
411 (safeurl, subtype))
411 (safeurl, subtype))
412
412
413 return respurl, proto, resp
413 return respurl, proto, resp
414
414
415 class httppeer(wireprotov1peer.wirepeer):
415 class httppeer(wireprotov1peer.wirepeer):
416 def __init__(self, ui, path, url, opener, requestbuilder, caps):
416 def __init__(self, ui, path, url, opener, requestbuilder, caps):
417 self.ui = ui
417 self.ui = ui
418 self._path = path
418 self._path = path
419 self._url = url
419 self._url = url
420 self._caps = caps
420 self._caps = caps
421 self._urlopener = opener
421 self._urlopener = opener
422 self._requestbuilder = requestbuilder
422 self._requestbuilder = requestbuilder
423
423
424 def __del__(self):
424 def __del__(self):
425 for h in self._urlopener.handlers:
425 for h in self._urlopener.handlers:
426 h.close()
426 h.close()
427 getattr(h, "close_all", lambda: None)()
427 getattr(h, "close_all", lambda: None)()
428
428
429 # Begin of ipeerconnection interface.
429 # Begin of ipeerconnection interface.
430
430
431 def url(self):
431 def url(self):
432 return self._path
432 return self._path
433
433
434 def local(self):
434 def local(self):
435 return None
435 return None
436
436
437 def peer(self):
437 def peer(self):
438 return self
438 return self
439
439
440 def canpush(self):
440 def canpush(self):
441 return True
441 return True
442
442
443 def close(self):
443 def close(self):
444 pass
444 pass
445
445
446 # End of ipeerconnection interface.
446 # End of ipeerconnection interface.
447
447
448 # Begin of ipeercommands interface.
448 # Begin of ipeercommands interface.
449
449
450 def capabilities(self):
450 def capabilities(self):
451 return self._caps
451 return self._caps
452
452
453 # End of ipeercommands interface.
453 # End of ipeercommands interface.
454
454
455 def _callstream(self, cmd, _compressible=False, **args):
455 def _callstream(self, cmd, _compressible=False, **args):
456 args = pycompat.byteskwargs(args)
456 args = pycompat.byteskwargs(args)
457
457
458 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
458 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
459 self._caps, self.capable,
459 self._caps, self.capable,
460 self._url, cmd, args)
460 self._url, cmd, args)
461
461
462 resp = sendrequest(self.ui, self._urlopener, req)
462 resp = sendrequest(self.ui, self._urlopener, req)
463
463
464 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
464 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
465 resp, _compressible)
465 resp, _compressible)
466
466
467 return resp
467 return resp
468
468
469 def _call(self, cmd, **args):
469 def _call(self, cmd, **args):
470 fp = self._callstream(cmd, **args)
470 fp = self._callstream(cmd, **args)
471 try:
471 try:
472 return fp.read()
472 return fp.read()
473 finally:
473 finally:
474 # if using keepalive, allow connection to be reused
474 # if using keepalive, allow connection to be reused
475 fp.close()
475 fp.close()
476
476
477 def _callpush(self, cmd, cg, **args):
477 def _callpush(self, cmd, cg, **args):
478 # have to stream bundle to a temp file because we do not have
478 # have to stream bundle to a temp file because we do not have
479 # http 1.1 chunked transfer.
479 # http 1.1 chunked transfer.
480
480
481 types = self.capable('unbundle')
481 types = self.capable('unbundle')
482 try:
482 try:
483 types = types.split(',')
483 types = types.split(',')
484 except AttributeError:
484 except AttributeError:
485 # servers older than d1b16a746db6 will send 'unbundle' as a
485 # servers older than d1b16a746db6 will send 'unbundle' as a
486 # boolean capability. They only support headerless/uncompressed
486 # boolean capability. They only support headerless/uncompressed
487 # bundles.
487 # bundles.
488 types = [""]
488 types = [""]
489 for x in types:
489 for x in types:
490 if x in bundle2.bundletypes:
490 if x in bundle2.bundletypes:
491 type = x
491 type = x
492 break
492 break
493
493
494 tempname = bundle2.writebundle(self.ui, cg, None, type)
494 tempname = bundle2.writebundle(self.ui, cg, None, type)
495 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
495 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
496 headers = {r'Content-Type': r'application/mercurial-0.1'}
496 headers = {r'Content-Type': r'application/mercurial-0.1'}
497
497
498 try:
498 try:
499 r = self._call(cmd, data=fp, headers=headers, **args)
499 r = self._call(cmd, data=fp, headers=headers, **args)
500 vals = r.split('\n', 1)
500 vals = r.split('\n', 1)
501 if len(vals) < 2:
501 if len(vals) < 2:
502 raise error.ResponseError(_("unexpected response:"), r)
502 raise error.ResponseError(_("unexpected response:"), r)
503 return vals
503 return vals
504 except urlerr.httperror:
504 except urlerr.httperror:
505 # Catch and re-raise these so we don't try and treat them
505 # Catch and re-raise these so we don't try and treat them
506 # like generic socket errors. They lack any values in
506 # like generic socket errors. They lack any values in
507 # .args on Python 3 which breaks our socket.error block.
507 # .args on Python 3 which breaks our socket.error block.
508 raise
508 raise
509 except socket.error as err:
509 except socket.error as err:
510 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
510 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
511 raise error.Abort(_('push failed: %s') % err.args[1])
511 raise error.Abort(_('push failed: %s') % err.args[1])
512 raise error.Abort(err.args[1])
512 raise error.Abort(err.args[1])
513 finally:
513 finally:
514 fp.close()
514 fp.close()
515 os.unlink(tempname)
515 os.unlink(tempname)
516
516
517 def _calltwowaystream(self, cmd, fp, **args):
517 def _calltwowaystream(self, cmd, fp, **args):
518 fh = None
518 fh = None
519 fp_ = None
519 fp_ = None
520 filename = None
520 filename = None
521 try:
521 try:
522 # dump bundle to disk
522 # dump bundle to disk
523 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
523 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
524 fh = os.fdopen(fd, r"wb")
524 fh = os.fdopen(fd, r"wb")
525 d = fp.read(4096)
525 d = fp.read(4096)
526 while d:
526 while d:
527 fh.write(d)
527 fh.write(d)
528 d = fp.read(4096)
528 d = fp.read(4096)
529 fh.close()
529 fh.close()
530 # start http push
530 # start http push
531 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
531 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
532 headers = {r'Content-Type': r'application/mercurial-0.1'}
532 headers = {r'Content-Type': r'application/mercurial-0.1'}
533 return self._callstream(cmd, data=fp_, headers=headers, **args)
533 return self._callstream(cmd, data=fp_, headers=headers, **args)
534 finally:
534 finally:
535 if fp_ is not None:
535 if fp_ is not None:
536 fp_.close()
536 fp_.close()
537 if fh is not None:
537 if fh is not None:
538 fh.close()
538 fh.close()
539 os.unlink(filename)
539 os.unlink(filename)
540
540
541 def _callcompressable(self, cmd, **args):
541 def _callcompressable(self, cmd, **args):
542 return self._callstream(cmd, _compressible=True, **args)
542 return self._callstream(cmd, _compressible=True, **args)
543
543
544 def _abort(self, exception):
544 def _abort(self, exception):
545 raise exception
545 raise exception
546
546
547 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
547 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
548 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
548 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
549 buffersends=True)
549 buffersends=True)
550
550
551 handler = wireprotov2peer.clienthandler(ui, reactor)
551 handler = wireprotov2peer.clienthandler(ui, reactor)
552
552
553 url = '%s/%s' % (apiurl, permission)
553 url = '%s/%s' % (apiurl, permission)
554
554
555 if len(requests) > 1:
555 if len(requests) > 1:
556 url += '/multirequest'
556 url += '/multirequest'
557 else:
557 else:
558 url += '/%s' % requests[0][0]
558 url += '/%s' % requests[0][0]
559
559
560 ui.debug('sending %d commands\n' % len(requests))
560 ui.debug('sending %d commands\n' % len(requests))
561 for command, args, f in requests:
561 for command, args, f in requests:
562 ui.debug('sending command %s: %s\n' % (
562 ui.debug('sending command %s: %s\n' % (
563 command, stringutil.pprint(args, indent=2)))
563 command, stringutil.pprint(args, indent=2)))
564 assert not list(handler.callcommand(command, args, f))
564 assert not list(handler.callcommand(command, args, f))
565
565
566 # TODO stream this.
566 # TODO stream this.
567 body = b''.join(map(bytes, handler.flushcommands()))
567 body = b''.join(map(bytes, handler.flushcommands()))
568
568
569 # TODO modify user-agent to reflect v2
569 # TODO modify user-agent to reflect v2
570 headers = {
570 headers = {
571 r'Accept': wireprotov2server.FRAMINGTYPE,
571 r'Accept': wireprotov2server.FRAMINGTYPE,
572 r'Content-Type': wireprotov2server.FRAMINGTYPE,
572 r'Content-Type': wireprotov2server.FRAMINGTYPE,
573 }
573 }
574
574
575 req = requestbuilder(pycompat.strurl(url), body, headers)
575 req = requestbuilder(pycompat.strurl(url), body, headers)
576 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
576 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
577
577
578 try:
578 try:
579 res = opener.open(req)
579 res = opener.open(req)
580 except urlerr.httperror as e:
580 except urlerr.httperror as e:
581 if e.code == 401:
581 if e.code == 401:
582 raise error.Abort(_('authorization failed'))
582 raise error.Abort(_('authorization failed'))
583
583
584 raise
584 raise
585 except httplib.HTTPException as e:
585 except httplib.HTTPException as e:
586 ui.traceback()
586 ui.traceback()
587 raise IOError(None, e)
587 raise IOError(None, e)
588
588
589 return handler, res
589 return handler, res
590
590
591 class queuedcommandfuture(pycompat.futures.Future):
591 class queuedcommandfuture(pycompat.futures.Future):
592 """Wraps result() on command futures to trigger submission on call."""
592 """Wraps result() on command futures to trigger submission on call."""
593
593
594 def result(self, timeout=None):
594 def result(self, timeout=None):
595 if self.done():
595 if self.done():
596 return pycompat.futures.Future.result(self, timeout)
596 return pycompat.futures.Future.result(self, timeout)
597
597
598 self._peerexecutor.sendcommands()
598 self._peerexecutor.sendcommands()
599
599
600 # sendcommands() will restore the original __class__ and self.result
600 # sendcommands() will restore the original __class__ and self.result
601 # will resolve to Future.result.
601 # will resolve to Future.result.
602 return self.result(timeout)
602 return self.result(timeout)
603
603
604 @interfaceutil.implementer(repository.ipeercommandexecutor)
604 @interfaceutil.implementer(repository.ipeercommandexecutor)
605 class httpv2executor(object):
605 class httpv2executor(object):
606 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
606 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
607 self._ui = ui
607 self._ui = ui
608 self._opener = opener
608 self._opener = opener
609 self._requestbuilder = requestbuilder
609 self._requestbuilder = requestbuilder
610 self._apiurl = apiurl
610 self._apiurl = apiurl
611 self._descriptor = descriptor
611 self._descriptor = descriptor
612 self._sent = False
612 self._sent = False
613 self._closed = False
613 self._closed = False
614 self._neededpermissions = set()
614 self._neededpermissions = set()
615 self._calls = []
615 self._calls = []
616 self._futures = weakref.WeakSet()
616 self._futures = weakref.WeakSet()
617 self._responseexecutor = None
617 self._responseexecutor = None
618 self._responsef = None
618 self._responsef = None
619
619
620 def __enter__(self):
620 def __enter__(self):
621 return self
621 return self
622
622
623 def __exit__(self, exctype, excvalue, exctb):
623 def __exit__(self, exctype, excvalue, exctb):
624 self.close()
624 self.close()
625
625
626 def callcommand(self, command, args):
626 def callcommand(self, command, args):
627 if self._sent:
627 if self._sent:
628 raise error.ProgrammingError('callcommand() cannot be used after '
628 raise error.ProgrammingError('callcommand() cannot be used after '
629 'commands are sent')
629 'commands are sent')
630
630
631 if self._closed:
631 if self._closed:
632 raise error.ProgrammingError('callcommand() cannot be used after '
632 raise error.ProgrammingError('callcommand() cannot be used after '
633 'close()')
633 'close()')
634
634
635 # The service advertises which commands are available. So if we attempt
635 # The service advertises which commands are available. So if we attempt
636 # to call an unknown command or pass an unknown argument, we can screen
636 # to call an unknown command or pass an unknown argument, we can screen
637 # for this.
637 # for this.
638 if command not in self._descriptor['commands']:
638 if command not in self._descriptor['commands']:
639 raise error.ProgrammingError(
639 raise error.ProgrammingError(
640 'wire protocol command %s is not available' % command)
640 'wire protocol command %s is not available' % command)
641
641
642 cmdinfo = self._descriptor['commands'][command]
642 cmdinfo = self._descriptor['commands'][command]
643 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
643 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
644
644
645 if unknownargs:
645 if unknownargs:
646 raise error.ProgrammingError(
646 raise error.ProgrammingError(
647 'wire protocol command %s does not accept argument: %s' % (
647 'wire protocol command %s does not accept argument: %s' % (
648 command, ', '.join(sorted(unknownargs))))
648 command, ', '.join(sorted(unknownargs))))
649
649
650 self._neededpermissions |= set(cmdinfo['permissions'])
650 self._neededpermissions |= set(cmdinfo['permissions'])
651
651
652 # TODO we /could/ also validate types here, since the API descriptor
652 # TODO we /could/ also validate types here, since the API descriptor
653 # includes types...
653 # includes types...
654
654
655 f = pycompat.futures.Future()
655 f = pycompat.futures.Future()
656
656
657 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
657 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
658 # could deadlock.
658 # could deadlock.
659 f.__class__ = queuedcommandfuture
659 f.__class__ = queuedcommandfuture
660 f._peerexecutor = self
660 f._peerexecutor = self
661
661
662 self._futures.add(f)
662 self._futures.add(f)
663 self._calls.append((command, args, f))
663 self._calls.append((command, args, f))
664
664
665 return f
665 return f
666
666
667 def sendcommands(self):
667 def sendcommands(self):
668 if self._sent:
668 if self._sent:
669 return
669 return
670
670
671 if not self._calls:
671 if not self._calls:
672 return
672 return
673
673
674 self._sent = True
674 self._sent = True
675
675
676 # Unhack any future types so caller sees a clean type and so we
676 # Unhack any future types so caller sees a clean type and so we
677 # break reference cycle.
677 # break reference cycle.
678 for f in self._futures:
678 for f in self._futures:
679 if isinstance(f, queuedcommandfuture):
679 if isinstance(f, queuedcommandfuture):
680 f.__class__ = pycompat.futures.Future
680 f.__class__ = pycompat.futures.Future
681 f._peerexecutor = None
681 f._peerexecutor = None
682
682
683 # Mark the future as running and filter out cancelled futures.
683 # Mark the future as running and filter out cancelled futures.
684 calls = [(command, args, f)
684 calls = [(command, args, f)
685 for command, args, f in self._calls
685 for command, args, f in self._calls
686 if f.set_running_or_notify_cancel()]
686 if f.set_running_or_notify_cancel()]
687
687
688 # Clear out references, prevent improper object usage.
688 # Clear out references, prevent improper object usage.
689 self._calls = None
689 self._calls = None
690
690
691 if not calls:
691 if not calls:
692 return
692 return
693
693
694 permissions = set(self._neededpermissions)
694 permissions = set(self._neededpermissions)
695
695
696 if 'push' in permissions and 'pull' in permissions:
696 if 'push' in permissions and 'pull' in permissions:
697 permissions.remove('pull')
697 permissions.remove('pull')
698
698
699 if len(permissions) > 1:
699 if len(permissions) > 1:
700 raise error.RepoError(_('cannot make request requiring multiple '
700 raise error.RepoError(_('cannot make request requiring multiple '
701 'permissions: %s') %
701 'permissions: %s') %
702 _(', ').join(sorted(permissions)))
702 _(', ').join(sorted(permissions)))
703
703
704 permission = {
704 permission = {
705 'push': 'rw',
705 'push': 'rw',
706 'pull': 'ro',
706 'pull': 'ro',
707 }[permissions.pop()]
707 }[permissions.pop()]
708
708
709 handler, resp = sendv2request(
709 handler, resp = sendv2request(
710 self._ui, self._opener, self._requestbuilder, self._apiurl,
710 self._ui, self._opener, self._requestbuilder, self._apiurl,
711 permission, calls)
711 permission, calls)
712
712
713 # TODO we probably want to validate the HTTP code, media type, etc.
713 # TODO we probably want to validate the HTTP code, media type, etc.
714
714
715 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
715 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
716 self._responsef = self._responseexecutor.submit(self._handleresponse,
716 self._responsef = self._responseexecutor.submit(self._handleresponse,
717 handler, resp)
717 handler, resp)
718
718
719 def close(self):
719 def close(self):
720 if self._closed:
720 if self._closed:
721 return
721 return
722
722
723 self.sendcommands()
723 self.sendcommands()
724
724
725 self._closed = True
725 self._closed = True
726
726
727 if not self._responsef:
727 if not self._responsef:
728 return
728 return
729
729
730 # TODO ^C here may not result in immediate program termination.
730 # TODO ^C here may not result in immediate program termination.
731
731
732 try:
732 try:
733 self._responsef.result()
733 self._responsef.result()
734 finally:
734 finally:
735 self._responseexecutor.shutdown(wait=True)
735 self._responseexecutor.shutdown(wait=True)
736 self._responsef = None
736 self._responsef = None
737 self._responseexecutor = None
737 self._responseexecutor = None
738
738
739 # If any of our futures are still in progress, mark them as
739 # If any of our futures are still in progress, mark them as
740 # errored, otherwise a result() could wait indefinitely.
740 # errored, otherwise a result() could wait indefinitely.
741 for f in self._futures:
741 for f in self._futures:
742 if not f.done():
742 if not f.done():
743 f.set_exception(error.ResponseError(
743 f.set_exception(error.ResponseError(
744 _('unfulfilled command response')))
744 _('unfulfilled command response')))
745
745
746 self._futures = None
746 self._futures = None
747
747
748 def _handleresponse(self, handler, resp):
748 def _handleresponse(self, handler, resp):
749 # Called in a thread to read the response.
749 # Called in a thread to read the response.
750
750
751 while handler.readframe(resp):
751 while handler.readframe(resp):
752 pass
752 pass
753
753
754 # TODO implement interface for version 2 peers
754 # TODO implement interface for version 2 peers
755 @interfaceutil.implementer(repository.ipeerconnection,
755 @interfaceutil.implementer(repository.ipeerconnection,
756 repository.ipeercapabilities,
756 repository.ipeercapabilities,
757 repository.ipeerrequests)
757 repository.ipeerrequests)
758 class httpv2peer(object):
758 class httpv2peer(object):
759 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
759 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
760 apidescriptor):
760 apidescriptor):
761 self.ui = ui
761 self.ui = ui
762
762
763 if repourl.endswith('/'):
763 if repourl.endswith('/'):
764 repourl = repourl[:-1]
764 repourl = repourl[:-1]
765
765
766 self._url = repourl
766 self._url = repourl
767 self._apipath = apipath
767 self._apipath = apipath
768 self._apiurl = '%s/%s' % (repourl, apipath)
768 self._apiurl = '%s/%s' % (repourl, apipath)
769 self._opener = opener
769 self._opener = opener
770 self._requestbuilder = requestbuilder
770 self._requestbuilder = requestbuilder
771 self._descriptor = apidescriptor
771 self._descriptor = apidescriptor
772
772
773 # Start of ipeerconnection.
773 # Start of ipeerconnection.
774
774
775 def url(self):
775 def url(self):
776 return self._url
776 return self._url
777
777
778 def local(self):
778 def local(self):
779 return None
779 return None
780
780
781 def peer(self):
781 def peer(self):
782 return self
782 return self
783
783
784 def canpush(self):
784 def canpush(self):
785 # TODO change once implemented.
785 # TODO change once implemented.
786 return False
786 return False
787
787
788 def close(self):
788 def close(self):
789 pass
789 pass
790
790
791 # End of ipeerconnection.
791 # End of ipeerconnection.
792
792
793 # Start of ipeercapabilities.
793 # Start of ipeercapabilities.
794
794
795 def capable(self, name):
795 def capable(self, name):
796 # The capabilities used internally historically map to capabilities
796 # The capabilities used internally historically map to capabilities
797 # advertised from the "capabilities" wire protocol command. However,
797 # advertised from the "capabilities" wire protocol command. However,
798 # version 2 of that command works differently.
798 # version 2 of that command works differently.
799
799
800 # Maps to commands that are available.
800 # Maps to commands that are available.
801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
802 return True
802 return True
803
803
804 # Other concepts.
804 # Other concepts.
805 # TODO remove exchangev2 once we have a command implemented.
805 if name in ('bundle2'):
806 if name in ('bundle2', 'exchangev2'):
807 return True
806 return True
808
807
809 # Alias command-* to presence of command of that name.
808 # Alias command-* to presence of command of that name.
810 if name.startswith('command-'):
809 if name.startswith('command-'):
811 return name[len('command-'):] in self._descriptor['commands']
810 return name[len('command-'):] in self._descriptor['commands']
812
811
813 return False
812 return False
814
813
815 def requirecap(self, name, purpose):
814 def requirecap(self, name, purpose):
816 if self.capable(name):
815 if self.capable(name):
817 return
816 return
818
817
819 raise error.CapabilityError(
818 raise error.CapabilityError(
820 _('cannot %s; client or remote repository does not support the %r '
819 _('cannot %s; client or remote repository does not support the %r '
821 'capability') % (purpose, name))
820 'capability') % (purpose, name))
822
821
823 # End of ipeercapabilities.
822 # End of ipeercapabilities.
824
823
825 def _call(self, name, **args):
824 def _call(self, name, **args):
826 with self.commandexecutor() as e:
825 with self.commandexecutor() as e:
827 return e.callcommand(name, args).result()
826 return e.callcommand(name, args).result()
828
827
829 def commandexecutor(self):
828 def commandexecutor(self):
830 return httpv2executor(self.ui, self._opener, self._requestbuilder,
829 return httpv2executor(self.ui, self._opener, self._requestbuilder,
831 self._apiurl, self._descriptor)
830 self._apiurl, self._descriptor)
832
831
833 # Registry of API service names to metadata about peers that handle it.
832 # Registry of API service names to metadata about peers that handle it.
834 #
833 #
835 # The following keys are meaningful:
834 # The following keys are meaningful:
836 #
835 #
837 # init
836 # init
838 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
837 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
839 # apidescriptor) to create a peer.
838 # apidescriptor) to create a peer.
840 #
839 #
841 # priority
840 # priority
842 # Integer priority for the service. If we could choose from multiple
841 # Integer priority for the service. If we could choose from multiple
843 # services, we choose the one with the highest priority.
842 # services, we choose the one with the highest priority.
844 API_PEERS = {
843 API_PEERS = {
845 wireprototypes.HTTP_WIREPROTO_V2: {
844 wireprototypes.HTTP_WIREPROTO_V2: {
846 'init': httpv2peer,
845 'init': httpv2peer,
847 'priority': 50,
846 'priority': 50,
848 },
847 },
849 }
848 }
850
849
851 def performhandshake(ui, url, opener, requestbuilder):
850 def performhandshake(ui, url, opener, requestbuilder):
852 # The handshake is a request to the capabilities command.
851 # The handshake is a request to the capabilities command.
853
852
854 caps = None
853 caps = None
855 def capable(x):
854 def capable(x):
856 raise error.ProgrammingError('should not be called')
855 raise error.ProgrammingError('should not be called')
857
856
858 args = {}
857 args = {}
859
858
860 # The client advertises support for newer protocols by adding an
859 # The client advertises support for newer protocols by adding an
861 # X-HgUpgrade-* header with a list of supported APIs and an
860 # X-HgUpgrade-* header with a list of supported APIs and an
862 # X-HgProto-* header advertising which serializing formats it supports.
861 # X-HgProto-* header advertising which serializing formats it supports.
863 # We only support the HTTP version 2 transport and CBOR responses for
862 # We only support the HTTP version 2 transport and CBOR responses for
864 # now.
863 # now.
865 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
864 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
866
865
867 if advertisev2:
866 if advertisev2:
868 args['headers'] = {
867 args['headers'] = {
869 r'X-HgProto-1': r'cbor',
868 r'X-HgProto-1': r'cbor',
870 }
869 }
871
870
872 args['headers'].update(
871 args['headers'].update(
873 encodevalueinheaders(' '.join(sorted(API_PEERS)),
872 encodevalueinheaders(' '.join(sorted(API_PEERS)),
874 'X-HgUpgrade',
873 'X-HgUpgrade',
875 # We don't know the header limit this early.
874 # We don't know the header limit this early.
876 # So make it small.
875 # So make it small.
877 1024))
876 1024))
878
877
879 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
878 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
880 capable, url, 'capabilities',
879 capable, url, 'capabilities',
881 args)
880 args)
882 resp = sendrequest(ui, opener, req)
881 resp = sendrequest(ui, opener, req)
883
882
884 # The server may redirect us to the repo root, stripping the
883 # The server may redirect us to the repo root, stripping the
885 # ?cmd=capabilities query string from the URL. The server would likely
884 # ?cmd=capabilities query string from the URL. The server would likely
886 # return HTML in this case and ``parsev1commandresponse()`` would raise.
885 # return HTML in this case and ``parsev1commandresponse()`` would raise.
887 # We catch this special case and re-issue the capabilities request against
886 # We catch this special case and re-issue the capabilities request against
888 # the new URL.
887 # the new URL.
889 #
888 #
890 # We should ideally not do this, as a redirect that drops the query
889 # We should ideally not do this, as a redirect that drops the query
891 # string from the URL is arguably a server bug. (Garbage in, garbage out).
890 # string from the URL is arguably a server bug. (Garbage in, garbage out).
892 # However, Mercurial clients for several years appeared to handle this
891 # However, Mercurial clients for several years appeared to handle this
893 # issue without behavior degradation. And according to issue 5860, it may
892 # issue without behavior degradation. And according to issue 5860, it may
894 # be a longstanding bug in some server implementations. So we allow a
893 # be a longstanding bug in some server implementations. So we allow a
895 # redirect that drops the query string to "just work."
894 # redirect that drops the query string to "just work."
896 try:
895 try:
897 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
896 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
898 compressible=False,
897 compressible=False,
899 allowcbor=advertisev2)
898 allowcbor=advertisev2)
900 except RedirectedRepoError as e:
899 except RedirectedRepoError as e:
901 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
900 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
902 capable, e.respurl,
901 capable, e.respurl,
903 'capabilities', args)
902 'capabilities', args)
904 resp = sendrequest(ui, opener, req)
903 resp = sendrequest(ui, opener, req)
905 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
904 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
906 compressible=False,
905 compressible=False,
907 allowcbor=advertisev2)
906 allowcbor=advertisev2)
908
907
909 try:
908 try:
910 rawdata = resp.read()
909 rawdata = resp.read()
911 finally:
910 finally:
912 resp.close()
911 resp.close()
913
912
914 if not ct.startswith('application/mercurial-'):
913 if not ct.startswith('application/mercurial-'):
915 raise error.ProgrammingError('unexpected content-type: %s' % ct)
914 raise error.ProgrammingError('unexpected content-type: %s' % ct)
916
915
917 if advertisev2:
916 if advertisev2:
918 if ct == 'application/mercurial-cbor':
917 if ct == 'application/mercurial-cbor':
919 try:
918 try:
920 info = cborutil.decodeall(rawdata)[0]
919 info = cborutil.decodeall(rawdata)[0]
921 except cborutil.CBORDecodeError:
920 except cborutil.CBORDecodeError:
922 raise error.Abort(_('error decoding CBOR from remote server'),
921 raise error.Abort(_('error decoding CBOR from remote server'),
923 hint=_('try again and consider contacting '
922 hint=_('try again and consider contacting '
924 'the server operator'))
923 'the server operator'))
925
924
926 # We got a legacy response. That's fine.
925 # We got a legacy response. That's fine.
927 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
926 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
928 info = {
927 info = {
929 'v1capabilities': set(rawdata.split())
928 'v1capabilities': set(rawdata.split())
930 }
929 }
931
930
932 else:
931 else:
933 raise error.RepoError(
932 raise error.RepoError(
934 _('unexpected response type from server: %s') % ct)
933 _('unexpected response type from server: %s') % ct)
935 else:
934 else:
936 info = {
935 info = {
937 'v1capabilities': set(rawdata.split())
936 'v1capabilities': set(rawdata.split())
938 }
937 }
939
938
940 return respurl, info
939 return respurl, info
941
940
942 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
941 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
943 """Construct an appropriate HTTP peer instance.
942 """Construct an appropriate HTTP peer instance.
944
943
945 ``opener`` is an ``url.opener`` that should be used to establish
944 ``opener`` is an ``url.opener`` that should be used to establish
946 connections, perform HTTP requests.
945 connections, perform HTTP requests.
947
946
948 ``requestbuilder`` is the type used for constructing HTTP requests.
947 ``requestbuilder`` is the type used for constructing HTTP requests.
949 It exists as an argument so extensions can override the default.
948 It exists as an argument so extensions can override the default.
950 """
949 """
951 u = util.url(path)
950 u = util.url(path)
952 if u.query or u.fragment:
951 if u.query or u.fragment:
953 raise error.Abort(_('unsupported URL component: "%s"') %
952 raise error.Abort(_('unsupported URL component: "%s"') %
954 (u.query or u.fragment))
953 (u.query or u.fragment))
955
954
956 # urllib cannot handle URLs with embedded user or passwd.
955 # urllib cannot handle URLs with embedded user or passwd.
957 url, authinfo = u.authinfo()
956 url, authinfo = u.authinfo()
958 ui.debug('using %s\n' % url)
957 ui.debug('using %s\n' % url)
959
958
960 opener = opener or urlmod.opener(ui, authinfo)
959 opener = opener or urlmod.opener(ui, authinfo)
961
960
962 respurl, info = performhandshake(ui, url, opener, requestbuilder)
961 respurl, info = performhandshake(ui, url, opener, requestbuilder)
963
962
964 # Given the intersection of APIs that both we and the server support,
963 # Given the intersection of APIs that both we and the server support,
965 # sort by their advertised priority and pick the first one.
964 # sort by their advertised priority and pick the first one.
966 #
965 #
967 # TODO consider making this request-based and interface driven. For
966 # TODO consider making this request-based and interface driven. For
968 # example, the caller could say "I want a peer that does X." It's quite
967 # example, the caller could say "I want a peer that does X." It's quite
969 # possible that not all peers would do that. Since we know the service
968 # possible that not all peers would do that. Since we know the service
970 # capabilities, we could filter out services not meeting the
969 # capabilities, we could filter out services not meeting the
971 # requirements. Possibly by consulting the interfaces defined by the
970 # requirements. Possibly by consulting the interfaces defined by the
972 # peer type.
971 # peer type.
973 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
972 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
974
973
975 preferredchoices = sorted(apipeerchoices,
974 preferredchoices = sorted(apipeerchoices,
976 key=lambda x: API_PEERS[x]['priority'],
975 key=lambda x: API_PEERS[x]['priority'],
977 reverse=True)
976 reverse=True)
978
977
979 for service in preferredchoices:
978 for service in preferredchoices:
980 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
979 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
981
980
982 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
981 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
983 requestbuilder,
982 requestbuilder,
984 info['apis'][service])
983 info['apis'][service])
985
984
986 # Failed to construct an API peer. Fall back to legacy.
985 # Failed to construct an API peer. Fall back to legacy.
987 return httppeer(ui, path, respurl, opener, requestbuilder,
986 return httppeer(ui, path, respurl, opener, requestbuilder,
988 info['v1capabilities'])
987 info['v1capabilities'])
989
988
990 def instance(ui, path, create, intents=None, createopts=None):
989 def instance(ui, path, create, intents=None, createopts=None):
991 if create:
990 if create:
992 raise error.Abort(_('cannot create new http repository'))
991 raise error.Abort(_('cannot create new http repository'))
993 try:
992 try:
994 if path.startswith('https:') and not urlmod.has_https:
993 if path.startswith('https:') and not urlmod.has_https:
995 raise error.Abort(_('Python support for SSL and HTTPS '
994 raise error.Abort(_('Python support for SSL and HTTPS '
996 'is not installed'))
995 'is not installed'))
997
996
998 inst = makepeer(ui, path)
997 inst = makepeer(ui, path)
999
998
1000 return inst
999 return inst
1001 except error.RepoError as httpexception:
1000 except error.RepoError as httpexception:
1002 try:
1001 try:
1003 r = statichttprepo.instance(ui, "static-" + path, create)
1002 r = statichttprepo.instance(ui, "static-" + path, create)
1004 ui.note(_('(falling back to static-http)\n'))
1003 ui.note(_('(falling back to static-http)\n'))
1005 return r
1004 return r
1006 except error.RepoError:
1005 except error.RepoError:
1007 raise httpexception # use the original http RepoError instead
1006 raise httpexception # use the original http RepoError instead
@@ -1,522 +1,601
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import (
13 nullid,
14 )
12 from . import (
15 from . import (
16 discovery,
13 encoding,
17 encoding,
14 error,
18 error,
15 pycompat,
19 pycompat,
16 streamclone,
20 streamclone,
17 util,
21 util,
18 wireprotoframing,
22 wireprotoframing,
19 wireprototypes,
23 wireprototypes,
20 )
24 )
21 from .utils import (
25 from .utils import (
22 interfaceutil,
26 interfaceutil,
23 )
27 )
24
28
25 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
29 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
26
30
27 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
31 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
28
32
29 COMMANDS = wireprototypes.commanddict()
33 COMMANDS = wireprototypes.commanddict()
30
34
31 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
35 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
32 from .hgweb import common as hgwebcommon
36 from .hgweb import common as hgwebcommon
33
37
34 # URL space looks like: <permissions>/<command>, where <permission> can
38 # URL space looks like: <permissions>/<command>, where <permission> can
35 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
39 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
36
40
37 # Root URL does nothing meaningful... yet.
41 # Root URL does nothing meaningful... yet.
38 if not urlparts:
42 if not urlparts:
39 res.status = b'200 OK'
43 res.status = b'200 OK'
40 res.headers[b'Content-Type'] = b'text/plain'
44 res.headers[b'Content-Type'] = b'text/plain'
41 res.setbodybytes(_('HTTP version 2 API handler'))
45 res.setbodybytes(_('HTTP version 2 API handler'))
42 return
46 return
43
47
44 if len(urlparts) == 1:
48 if len(urlparts) == 1:
45 res.status = b'404 Not Found'
49 res.status = b'404 Not Found'
46 res.headers[b'Content-Type'] = b'text/plain'
50 res.headers[b'Content-Type'] = b'text/plain'
47 res.setbodybytes(_('do not know how to process %s\n') %
51 res.setbodybytes(_('do not know how to process %s\n') %
48 req.dispatchpath)
52 req.dispatchpath)
49 return
53 return
50
54
51 permission, command = urlparts[0:2]
55 permission, command = urlparts[0:2]
52
56
53 if permission not in (b'ro', b'rw'):
57 if permission not in (b'ro', b'rw'):
54 res.status = b'404 Not Found'
58 res.status = b'404 Not Found'
55 res.headers[b'Content-Type'] = b'text/plain'
59 res.headers[b'Content-Type'] = b'text/plain'
56 res.setbodybytes(_('unknown permission: %s') % permission)
60 res.setbodybytes(_('unknown permission: %s') % permission)
57 return
61 return
58
62
59 if req.method != 'POST':
63 if req.method != 'POST':
60 res.status = b'405 Method Not Allowed'
64 res.status = b'405 Method Not Allowed'
61 res.headers[b'Allow'] = b'POST'
65 res.headers[b'Allow'] = b'POST'
62 res.setbodybytes(_('commands require POST requests'))
66 res.setbodybytes(_('commands require POST requests'))
63 return
67 return
64
68
65 # At some point we'll want to use our own API instead of recycling the
69 # At some point we'll want to use our own API instead of recycling the
66 # behavior of version 1 of the wire protocol...
70 # behavior of version 1 of the wire protocol...
67 # TODO return reasonable responses - not responses that overload the
71 # TODO return reasonable responses - not responses that overload the
68 # HTTP status line message for error reporting.
72 # HTTP status line message for error reporting.
69 try:
73 try:
70 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
74 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
71 except hgwebcommon.ErrorResponse as e:
75 except hgwebcommon.ErrorResponse as e:
72 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
76 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
73 for k, v in e.headers:
77 for k, v in e.headers:
74 res.headers[k] = v
78 res.headers[k] = v
75 res.setbodybytes('permission denied')
79 res.setbodybytes('permission denied')
76 return
80 return
77
81
78 # We have a special endpoint to reflect the request back at the client.
82 # We have a special endpoint to reflect the request back at the client.
79 if command == b'debugreflect':
83 if command == b'debugreflect':
80 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
84 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
81 return
85 return
82
86
83 # Extra commands that we handle that aren't really wire protocol
87 # Extra commands that we handle that aren't really wire protocol
84 # commands. Think extra hard before making this hackery available to
88 # commands. Think extra hard before making this hackery available to
85 # extension.
89 # extension.
86 extracommands = {'multirequest'}
90 extracommands = {'multirequest'}
87
91
88 if command not in COMMANDS and command not in extracommands:
92 if command not in COMMANDS and command not in extracommands:
89 res.status = b'404 Not Found'
93 res.status = b'404 Not Found'
90 res.headers[b'Content-Type'] = b'text/plain'
94 res.headers[b'Content-Type'] = b'text/plain'
91 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
95 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
92 return
96 return
93
97
94 repo = rctx.repo
98 repo = rctx.repo
95 ui = repo.ui
99 ui = repo.ui
96
100
97 proto = httpv2protocolhandler(req, ui)
101 proto = httpv2protocolhandler(req, ui)
98
102
99 if (not COMMANDS.commandavailable(command, proto)
103 if (not COMMANDS.commandavailable(command, proto)
100 and command not in extracommands):
104 and command not in extracommands):
101 res.status = b'404 Not Found'
105 res.status = b'404 Not Found'
102 res.headers[b'Content-Type'] = b'text/plain'
106 res.headers[b'Content-Type'] = b'text/plain'
103 res.setbodybytes(_('invalid wire protocol command: %s') % command)
107 res.setbodybytes(_('invalid wire protocol command: %s') % command)
104 return
108 return
105
109
106 # TODO consider cases where proxies may add additional Accept headers.
110 # TODO consider cases where proxies may add additional Accept headers.
107 if req.headers.get(b'Accept') != FRAMINGTYPE:
111 if req.headers.get(b'Accept') != FRAMINGTYPE:
108 res.status = b'406 Not Acceptable'
112 res.status = b'406 Not Acceptable'
109 res.headers[b'Content-Type'] = b'text/plain'
113 res.headers[b'Content-Type'] = b'text/plain'
110 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
114 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
111 % FRAMINGTYPE)
115 % FRAMINGTYPE)
112 return
116 return
113
117
114 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
118 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
115 res.status = b'415 Unsupported Media Type'
119 res.status = b'415 Unsupported Media Type'
116 # TODO we should send a response with appropriate media type,
120 # TODO we should send a response with appropriate media type,
117 # since client does Accept it.
121 # since client does Accept it.
118 res.headers[b'Content-Type'] = b'text/plain'
122 res.headers[b'Content-Type'] = b'text/plain'
119 res.setbodybytes(_('client MUST send Content-Type header with '
123 res.setbodybytes(_('client MUST send Content-Type header with '
120 'value: %s\n') % FRAMINGTYPE)
124 'value: %s\n') % FRAMINGTYPE)
121 return
125 return
122
126
123 _processhttpv2request(ui, repo, req, res, permission, command, proto)
127 _processhttpv2request(ui, repo, req, res, permission, command, proto)
124
128
125 def _processhttpv2reflectrequest(ui, repo, req, res):
129 def _processhttpv2reflectrequest(ui, repo, req, res):
126 """Reads unified frame protocol request and dumps out state to client.
130 """Reads unified frame protocol request and dumps out state to client.
127
131
128 This special endpoint can be used to help debug the wire protocol.
132 This special endpoint can be used to help debug the wire protocol.
129
133
130 Instead of routing the request through the normal dispatch mechanism,
134 Instead of routing the request through the normal dispatch mechanism,
131 we instead read all frames, decode them, and feed them into our state
135 we instead read all frames, decode them, and feed them into our state
132 tracker. We then dump the log of all that activity back out to the
136 tracker. We then dump the log of all that activity back out to the
133 client.
137 client.
134 """
138 """
135 import json
139 import json
136
140
137 # Reflection APIs have a history of being abused, accidentally disclosing
141 # Reflection APIs have a history of being abused, accidentally disclosing
138 # sensitive data, etc. So we have a config knob.
142 # sensitive data, etc. So we have a config knob.
139 if not ui.configbool('experimental', 'web.api.debugreflect'):
143 if not ui.configbool('experimental', 'web.api.debugreflect'):
140 res.status = b'404 Not Found'
144 res.status = b'404 Not Found'
141 res.headers[b'Content-Type'] = b'text/plain'
145 res.headers[b'Content-Type'] = b'text/plain'
142 res.setbodybytes(_('debugreflect service not available'))
146 res.setbodybytes(_('debugreflect service not available'))
143 return
147 return
144
148
145 # We assume we have a unified framing protocol request body.
149 # We assume we have a unified framing protocol request body.
146
150
147 reactor = wireprotoframing.serverreactor()
151 reactor = wireprotoframing.serverreactor()
148 states = []
152 states = []
149
153
150 while True:
154 while True:
151 frame = wireprotoframing.readframe(req.bodyfh)
155 frame = wireprotoframing.readframe(req.bodyfh)
152
156
153 if not frame:
157 if not frame:
154 states.append(b'received: <no frame>')
158 states.append(b'received: <no frame>')
155 break
159 break
156
160
157 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
161 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
158 frame.requestid,
162 frame.requestid,
159 frame.payload))
163 frame.payload))
160
164
161 action, meta = reactor.onframerecv(frame)
165 action, meta = reactor.onframerecv(frame)
162 states.append(json.dumps((action, meta), sort_keys=True,
166 states.append(json.dumps((action, meta), sort_keys=True,
163 separators=(', ', ': ')))
167 separators=(', ', ': ')))
164
168
165 action, meta = reactor.oninputeof()
169 action, meta = reactor.oninputeof()
166 meta['action'] = action
170 meta['action'] = action
167 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
171 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
168
172
169 res.status = b'200 OK'
173 res.status = b'200 OK'
170 res.headers[b'Content-Type'] = b'text/plain'
174 res.headers[b'Content-Type'] = b'text/plain'
171 res.setbodybytes(b'\n'.join(states))
175 res.setbodybytes(b'\n'.join(states))
172
176
173 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
177 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
174 """Post-validation handler for HTTPv2 requests.
178 """Post-validation handler for HTTPv2 requests.
175
179
176 Called when the HTTP request contains unified frame-based protocol
180 Called when the HTTP request contains unified frame-based protocol
177 frames for evaluation.
181 frames for evaluation.
178 """
182 """
179 # TODO Some HTTP clients are full duplex and can receive data before
183 # TODO Some HTTP clients are full duplex and can receive data before
180 # the entire request is transmitted. Figure out a way to indicate support
184 # the entire request is transmitted. Figure out a way to indicate support
181 # for that so we can opt into full duplex mode.
185 # for that so we can opt into full duplex mode.
182 reactor = wireprotoframing.serverreactor(deferoutput=True)
186 reactor = wireprotoframing.serverreactor(deferoutput=True)
183 seencommand = False
187 seencommand = False
184
188
185 outstream = reactor.makeoutputstream()
189 outstream = reactor.makeoutputstream()
186
190
187 while True:
191 while True:
188 frame = wireprotoframing.readframe(req.bodyfh)
192 frame = wireprotoframing.readframe(req.bodyfh)
189 if not frame:
193 if not frame:
190 break
194 break
191
195
192 action, meta = reactor.onframerecv(frame)
196 action, meta = reactor.onframerecv(frame)
193
197
194 if action == 'wantframe':
198 if action == 'wantframe':
195 # Need more data before we can do anything.
199 # Need more data before we can do anything.
196 continue
200 continue
197 elif action == 'runcommand':
201 elif action == 'runcommand':
198 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
202 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
199 reqcommand, reactor, outstream,
203 reqcommand, reactor, outstream,
200 meta, issubsequent=seencommand)
204 meta, issubsequent=seencommand)
201
205
202 if sentoutput:
206 if sentoutput:
203 return
207 return
204
208
205 seencommand = True
209 seencommand = True
206
210
207 elif action == 'error':
211 elif action == 'error':
208 # TODO define proper error mechanism.
212 # TODO define proper error mechanism.
209 res.status = b'200 OK'
213 res.status = b'200 OK'
210 res.headers[b'Content-Type'] = b'text/plain'
214 res.headers[b'Content-Type'] = b'text/plain'
211 res.setbodybytes(meta['message'] + b'\n')
215 res.setbodybytes(meta['message'] + b'\n')
212 return
216 return
213 else:
217 else:
214 raise error.ProgrammingError(
218 raise error.ProgrammingError(
215 'unhandled action from frame processor: %s' % action)
219 'unhandled action from frame processor: %s' % action)
216
220
217 action, meta = reactor.oninputeof()
221 action, meta = reactor.oninputeof()
218 if action == 'sendframes':
222 if action == 'sendframes':
219 # We assume we haven't started sending the response yet. If we're
223 # We assume we haven't started sending the response yet. If we're
220 # wrong, the response type will raise an exception.
224 # wrong, the response type will raise an exception.
221 res.status = b'200 OK'
225 res.status = b'200 OK'
222 res.headers[b'Content-Type'] = FRAMINGTYPE
226 res.headers[b'Content-Type'] = FRAMINGTYPE
223 res.setbodygen(meta['framegen'])
227 res.setbodygen(meta['framegen'])
224 elif action == 'noop':
228 elif action == 'noop':
225 pass
229 pass
226 else:
230 else:
227 raise error.ProgrammingError('unhandled action from frame processor: %s'
231 raise error.ProgrammingError('unhandled action from frame processor: %s'
228 % action)
232 % action)
229
233
230 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
234 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
231 outstream, command, issubsequent):
235 outstream, command, issubsequent):
232 """Dispatch a wire protocol command made from HTTPv2 requests.
236 """Dispatch a wire protocol command made from HTTPv2 requests.
233
237
234 The authenticated permission (``authedperm``) along with the original
238 The authenticated permission (``authedperm``) along with the original
235 command from the URL (``reqcommand``) are passed in.
239 command from the URL (``reqcommand``) are passed in.
236 """
240 """
237 # We already validated that the session has permissions to perform the
241 # We already validated that the session has permissions to perform the
238 # actions in ``authedperm``. In the unified frame protocol, the canonical
242 # actions in ``authedperm``. In the unified frame protocol, the canonical
239 # command to run is expressed in a frame. However, the URL also requested
243 # command to run is expressed in a frame. However, the URL also requested
240 # to run a specific command. We need to be careful that the command we
244 # to run a specific command. We need to be careful that the command we
241 # run doesn't have permissions requirements greater than what was granted
245 # run doesn't have permissions requirements greater than what was granted
242 # by ``authedperm``.
246 # by ``authedperm``.
243 #
247 #
244 # Our rule for this is we only allow one command per HTTP request and
248 # Our rule for this is we only allow one command per HTTP request and
245 # that command must match the command in the URL. However, we make
249 # that command must match the command in the URL. However, we make
246 # an exception for the ``multirequest`` URL. This URL is allowed to
250 # an exception for the ``multirequest`` URL. This URL is allowed to
247 # execute multiple commands. We double check permissions of each command
251 # execute multiple commands. We double check permissions of each command
248 # as it is invoked to ensure there is no privilege escalation.
252 # as it is invoked to ensure there is no privilege escalation.
249 # TODO consider allowing multiple commands to regular command URLs
253 # TODO consider allowing multiple commands to regular command URLs
250 # iff each command is the same.
254 # iff each command is the same.
251
255
252 proto = httpv2protocolhandler(req, ui, args=command['args'])
256 proto = httpv2protocolhandler(req, ui, args=command['args'])
253
257
254 if reqcommand == b'multirequest':
258 if reqcommand == b'multirequest':
255 if not COMMANDS.commandavailable(command['command'], proto):
259 if not COMMANDS.commandavailable(command['command'], proto):
256 # TODO proper error mechanism
260 # TODO proper error mechanism
257 res.status = b'200 OK'
261 res.status = b'200 OK'
258 res.headers[b'Content-Type'] = b'text/plain'
262 res.headers[b'Content-Type'] = b'text/plain'
259 res.setbodybytes(_('wire protocol command not available: %s') %
263 res.setbodybytes(_('wire protocol command not available: %s') %
260 command['command'])
264 command['command'])
261 return True
265 return True
262
266
263 # TODO don't use assert here, since it may be elided by -O.
267 # TODO don't use assert here, since it may be elided by -O.
264 assert authedperm in (b'ro', b'rw')
268 assert authedperm in (b'ro', b'rw')
265 wirecommand = COMMANDS[command['command']]
269 wirecommand = COMMANDS[command['command']]
266 assert wirecommand.permission in ('push', 'pull')
270 assert wirecommand.permission in ('push', 'pull')
267
271
268 if authedperm == b'ro' and wirecommand.permission != 'pull':
272 if authedperm == b'ro' and wirecommand.permission != 'pull':
269 # TODO proper error mechanism
273 # TODO proper error mechanism
270 res.status = b'403 Forbidden'
274 res.status = b'403 Forbidden'
271 res.headers[b'Content-Type'] = b'text/plain'
275 res.headers[b'Content-Type'] = b'text/plain'
272 res.setbodybytes(_('insufficient permissions to execute '
276 res.setbodybytes(_('insufficient permissions to execute '
273 'command: %s') % command['command'])
277 'command: %s') % command['command'])
274 return True
278 return True
275
279
276 # TODO should we also call checkperm() here? Maybe not if we're going
280 # TODO should we also call checkperm() here? Maybe not if we're going
277 # to overhaul that API. The granted scope from the URL check should
281 # to overhaul that API. The granted scope from the URL check should
278 # be good enough.
282 # be good enough.
279
283
280 else:
284 else:
281 # Don't allow multiple commands outside of ``multirequest`` URL.
285 # Don't allow multiple commands outside of ``multirequest`` URL.
282 if issubsequent:
286 if issubsequent:
283 # TODO proper error mechanism
287 # TODO proper error mechanism
284 res.status = b'200 OK'
288 res.status = b'200 OK'
285 res.headers[b'Content-Type'] = b'text/plain'
289 res.headers[b'Content-Type'] = b'text/plain'
286 res.setbodybytes(_('multiple commands cannot be issued to this '
290 res.setbodybytes(_('multiple commands cannot be issued to this '
287 'URL'))
291 'URL'))
288 return True
292 return True
289
293
290 if reqcommand != command['command']:
294 if reqcommand != command['command']:
291 # TODO define proper error mechanism
295 # TODO define proper error mechanism
292 res.status = b'200 OK'
296 res.status = b'200 OK'
293 res.headers[b'Content-Type'] = b'text/plain'
297 res.headers[b'Content-Type'] = b'text/plain'
294 res.setbodybytes(_('command in frame must match command in URL'))
298 res.setbodybytes(_('command in frame must match command in URL'))
295 return True
299 return True
296
300
297 res.status = b'200 OK'
301 res.status = b'200 OK'
298 res.headers[b'Content-Type'] = FRAMINGTYPE
302 res.headers[b'Content-Type'] = FRAMINGTYPE
299
303
300 try:
304 try:
301 objs = dispatch(repo, proto, command['command'])
305 objs = dispatch(repo, proto, command['command'])
302
306
303 action, meta = reactor.oncommandresponsereadyobjects(
307 action, meta = reactor.oncommandresponsereadyobjects(
304 outstream, command['requestid'], objs)
308 outstream, command['requestid'], objs)
305
309
306 except Exception as e:
310 except Exception as e:
307 action, meta = reactor.onservererror(
311 action, meta = reactor.onservererror(
308 outstream, command['requestid'],
312 outstream, command['requestid'],
309 _('exception when invoking command: %s') % e)
313 _('exception when invoking command: %s') % e)
310
314
311 if action == 'sendframes':
315 if action == 'sendframes':
312 res.setbodygen(meta['framegen'])
316 res.setbodygen(meta['framegen'])
313 return True
317 return True
314 elif action == 'noop':
318 elif action == 'noop':
315 return False
319 return False
316 else:
320 else:
317 raise error.ProgrammingError('unhandled event from reactor: %s' %
321 raise error.ProgrammingError('unhandled event from reactor: %s' %
318 action)
322 action)
319
323
320 def getdispatchrepo(repo, proto, command):
324 def getdispatchrepo(repo, proto, command):
321 return repo.filtered('served')
325 return repo.filtered('served')
322
326
323 def dispatch(repo, proto, command):
327 def dispatch(repo, proto, command):
324 repo = getdispatchrepo(repo, proto, command)
328 repo = getdispatchrepo(repo, proto, command)
325
329
326 func, spec = COMMANDS[command]
330 func, spec = COMMANDS[command]
327 args = proto.getargs(spec)
331 args = proto.getargs(spec)
328
332
329 return func(repo, proto, **args)
333 return func(repo, proto, **args)
330
334
331 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
335 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
332 class httpv2protocolhandler(object):
336 class httpv2protocolhandler(object):
333 def __init__(self, req, ui, args=None):
337 def __init__(self, req, ui, args=None):
334 self._req = req
338 self._req = req
335 self._ui = ui
339 self._ui = ui
336 self._args = args
340 self._args = args
337
341
338 @property
342 @property
339 def name(self):
343 def name(self):
340 return HTTP_WIREPROTO_V2
344 return HTTP_WIREPROTO_V2
341
345
342 def getargs(self, args):
346 def getargs(self, args):
343 data = {}
347 data = {}
344 for k, typ in args.items():
348 for k, typ in args.items():
345 if k == '*':
349 if k == '*':
346 raise NotImplementedError('do not support * args')
350 raise NotImplementedError('do not support * args')
347 elif k in self._args:
351 elif k in self._args:
348 # TODO consider validating value types.
352 # TODO consider validating value types.
349 data[k] = self._args[k]
353 data[k] = self._args[k]
350
354
351 return data
355 return data
352
356
353 def getprotocaps(self):
357 def getprotocaps(self):
354 # Protocol capabilities are currently not implemented for HTTP V2.
358 # Protocol capabilities are currently not implemented for HTTP V2.
355 return set()
359 return set()
356
360
357 def getpayload(self):
361 def getpayload(self):
358 raise NotImplementedError
362 raise NotImplementedError
359
363
360 @contextlib.contextmanager
364 @contextlib.contextmanager
361 def mayberedirectstdio(self):
365 def mayberedirectstdio(self):
362 raise NotImplementedError
366 raise NotImplementedError
363
367
364 def client(self):
368 def client(self):
365 raise NotImplementedError
369 raise NotImplementedError
366
370
367 def addcapabilities(self, repo, caps):
371 def addcapabilities(self, repo, caps):
368 return caps
372 return caps
369
373
370 def checkperm(self, perm):
374 def checkperm(self, perm):
371 raise NotImplementedError
375 raise NotImplementedError
372
376
373 def httpv2apidescriptor(req, repo):
377 def httpv2apidescriptor(req, repo):
374 proto = httpv2protocolhandler(req, repo.ui)
378 proto = httpv2protocolhandler(req, repo.ui)
375
379
376 return _capabilitiesv2(repo, proto)
380 return _capabilitiesv2(repo, proto)
377
381
378 def _capabilitiesv2(repo, proto):
382 def _capabilitiesv2(repo, proto):
379 """Obtain the set of capabilities for version 2 transports.
383 """Obtain the set of capabilities for version 2 transports.
380
384
381 These capabilities are distinct from the capabilities for version 1
385 These capabilities are distinct from the capabilities for version 1
382 transports.
386 transports.
383 """
387 """
384 compression = []
388 compression = []
385 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
389 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
386 compression.append({
390 compression.append({
387 b'name': engine.wireprotosupport().name,
391 b'name': engine.wireprotosupport().name,
388 })
392 })
389
393
390 caps = {
394 caps = {
391 'commands': {},
395 'commands': {},
392 'compression': compression,
396 'compression': compression,
393 'framingmediatypes': [FRAMINGTYPE],
397 'framingmediatypes': [FRAMINGTYPE],
394 }
398 }
395
399
396 for command, entry in COMMANDS.items():
400 for command, entry in COMMANDS.items():
397 caps['commands'][command] = {
401 caps['commands'][command] = {
398 'args': entry.args,
402 'args': entry.args,
399 'permissions': [entry.permission],
403 'permissions': [entry.permission],
400 }
404 }
401
405
402 if streamclone.allowservergeneration(repo):
406 if streamclone.allowservergeneration(repo):
403 caps['rawrepoformats'] = sorted(repo.requirements &
407 caps['rawrepoformats'] = sorted(repo.requirements &
404 repo.supportedformats)
408 repo.supportedformats)
405
409
406 return proto.addcapabilities(repo, caps)
410 return proto.addcapabilities(repo, caps)
407
411
408 def wireprotocommand(name, args=None, permission='push'):
412 def wireprotocommand(name, args=None, permission='push'):
409 """Decorator to declare a wire protocol command.
413 """Decorator to declare a wire protocol command.
410
414
411 ``name`` is the name of the wire protocol command being provided.
415 ``name`` is the name of the wire protocol command being provided.
412
416
413 ``args`` is a dict of argument names to example values.
417 ``args`` is a dict of argument names to example values.
414
418
415 ``permission`` defines the permission type needed to run this command.
419 ``permission`` defines the permission type needed to run this command.
416 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
420 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
417 respectively. Default is to assume command requires ``push`` permissions
421 respectively. Default is to assume command requires ``push`` permissions
418 because otherwise commands not declaring their permissions could modify
422 because otherwise commands not declaring their permissions could modify
419 a repository that is supposed to be read-only.
423 a repository that is supposed to be read-only.
420
424
421 Wire protocol commands are generators of objects to be serialized and
425 Wire protocol commands are generators of objects to be serialized and
422 sent to the client.
426 sent to the client.
423
427
424 If a command raises an uncaught exception, this will be translated into
428 If a command raises an uncaught exception, this will be translated into
425 a command error.
429 a command error.
426 """
430 """
427 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
431 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
428 if v['version'] == 2}
432 if v['version'] == 2}
429
433
430 if permission not in ('push', 'pull'):
434 if permission not in ('push', 'pull'):
431 raise error.ProgrammingError('invalid wire protocol permission; '
435 raise error.ProgrammingError('invalid wire protocol permission; '
432 'got %s; expected "push" or "pull"' %
436 'got %s; expected "push" or "pull"' %
433 permission)
437 permission)
434
438
435 if args is None:
439 if args is None:
436 args = {}
440 args = {}
437
441
438 if not isinstance(args, dict):
442 if not isinstance(args, dict):
439 raise error.ProgrammingError('arguments for version 2 commands '
443 raise error.ProgrammingError('arguments for version 2 commands '
440 'must be declared as dicts')
444 'must be declared as dicts')
441
445
442 def register(func):
446 def register(func):
443 if name in COMMANDS:
447 if name in COMMANDS:
444 raise error.ProgrammingError('%s command already registered '
448 raise error.ProgrammingError('%s command already registered '
445 'for version 2' % name)
449 'for version 2' % name)
446
450
447 COMMANDS[name] = wireprototypes.commandentry(
451 COMMANDS[name] = wireprototypes.commandentry(
448 func, args=args, transports=transports, permission=permission)
452 func, args=args, transports=transports, permission=permission)
449
453
450 return func
454 return func
451
455
452 return register
456 return register
453
457
454 @wireprotocommand('branchmap', permission='pull')
458 @wireprotocommand('branchmap', permission='pull')
455 def branchmapv2(repo, proto):
459 def branchmapv2(repo, proto):
456 yield {encoding.fromlocal(k): v
460 yield {encoding.fromlocal(k): v
457 for k, v in repo.branchmap().iteritems()}
461 for k, v in repo.branchmap().iteritems()}
458
462
459 @wireprotocommand('capabilities', permission='pull')
463 @wireprotocommand('capabilities', permission='pull')
460 def capabilitiesv2(repo, proto):
464 def capabilitiesv2(repo, proto):
461 yield _capabilitiesv2(repo, proto)
465 yield _capabilitiesv2(repo, proto)
462
466
467 @wireprotocommand('changesetdata',
468 args={
469 'noderange': [[b'0123456...'], [b'abcdef...']],
470 'nodes': [b'0123456...'],
471 'fields': {b'parents', b'revision'},
472 },
473 permission='pull')
474 def changesetdata(repo, proto, noderange=None, nodes=None, fields=None):
475 fields = fields or set()
476
477 if noderange is None and nodes is None:
478 raise error.WireprotoCommandError(
479 'noderange or nodes must be defined')
480
481 if noderange is not None:
482 if len(noderange) != 2:
483 raise error.WireprotoCommandError(
484 'noderange must consist of 2 elements')
485
486 if not noderange[1]:
487 raise error.WireprotoCommandError(
488 'heads in noderange request cannot be empty')
489
490 cl = repo.changelog
491 hasnode = cl.hasnode
492
493 seen = set()
494 outgoing = []
495
496 if nodes is not None:
497 outgoing.extend(n for n in nodes if hasnode(n))
498 seen |= set(outgoing)
499
500 if noderange is not None:
501 if noderange[0]:
502 common = [n for n in noderange[0] if hasnode(n)]
503 else:
504 common = [nullid]
505
506 for n in discovery.outgoing(repo, common, noderange[1]).missing:
507 if n not in seen:
508 outgoing.append(n)
509 # Don't need to add to seen here because this is the final
510 # source of nodes and there should be no duplicates in this
511 # list.
512
513 seen.clear()
514
515 if outgoing:
516 repo.hook('preoutgoing', throw=True, source='serve')
517
518 yield {
519 b'totalitems': len(outgoing),
520 }
521
522 # It is already topologically sorted by revision number.
523 for node in outgoing:
524 d = {
525 b'node': node,
526 }
527
528 if b'parents' in fields:
529 d[b'parents'] = cl.parents(node)
530
531 revisiondata = None
532
533 if b'revision' in fields:
534 revisiondata = cl.revision(node, raw=True)
535 d[b'revisionsize'] = len(revisiondata)
536
537 yield d
538
539 if revisiondata is not None:
540 yield revisiondata
541
463 @wireprotocommand('heads',
542 @wireprotocommand('heads',
464 args={
543 args={
465 'publiconly': False,
544 'publiconly': False,
466 },
545 },
467 permission='pull')
546 permission='pull')
468 def headsv2(repo, proto, publiconly=False):
547 def headsv2(repo, proto, publiconly=False):
469 if publiconly:
548 if publiconly:
470 repo = repo.filtered('immutable')
549 repo = repo.filtered('immutable')
471
550
472 yield repo.heads()
551 yield repo.heads()
473
552
474 @wireprotocommand('known',
553 @wireprotocommand('known',
475 args={
554 args={
476 'nodes': [b'deadbeef'],
555 'nodes': [b'deadbeef'],
477 },
556 },
478 permission='pull')
557 permission='pull')
479 def knownv2(repo, proto, nodes=None):
558 def knownv2(repo, proto, nodes=None):
480 nodes = nodes or []
559 nodes = nodes or []
481 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
560 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
482 yield result
561 yield result
483
562
484 @wireprotocommand('listkeys',
563 @wireprotocommand('listkeys',
485 args={
564 args={
486 'namespace': b'ns',
565 'namespace': b'ns',
487 },
566 },
488 permission='pull')
567 permission='pull')
489 def listkeysv2(repo, proto, namespace=None):
568 def listkeysv2(repo, proto, namespace=None):
490 keys = repo.listkeys(encoding.tolocal(namespace))
569 keys = repo.listkeys(encoding.tolocal(namespace))
491 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
570 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
492 for k, v in keys.iteritems()}
571 for k, v in keys.iteritems()}
493
572
494 yield keys
573 yield keys
495
574
496 @wireprotocommand('lookup',
575 @wireprotocommand('lookup',
497 args={
576 args={
498 'key': b'foo',
577 'key': b'foo',
499 },
578 },
500 permission='pull')
579 permission='pull')
501 def lookupv2(repo, proto, key):
580 def lookupv2(repo, proto, key):
502 key = encoding.tolocal(key)
581 key = encoding.tolocal(key)
503
582
504 # TODO handle exception.
583 # TODO handle exception.
505 node = repo.lookup(key)
584 node = repo.lookup(key)
506
585
507 yield node
586 yield node
508
587
509 @wireprotocommand('pushkey',
588 @wireprotocommand('pushkey',
510 args={
589 args={
511 'namespace': b'ns',
590 'namespace': b'ns',
512 'key': b'key',
591 'key': b'key',
513 'old': b'old',
592 'old': b'old',
514 'new': b'new',
593 'new': b'new',
515 },
594 },
516 permission='push')
595 permission='push')
517 def pushkeyv2(repo, proto, namespace, key, old, new):
596 def pushkeyv2(repo, proto, namespace, key, old, new):
518 # TODO handle ui output redirection
597 # TODO handle ui output redirection
519 yield repo.pushkey(encoding.tolocal(namespace),
598 yield repo.pushkey(encoding.tolocal(namespace),
520 encoding.tolocal(key),
599 encoding.tolocal(key),
521 encoding.tolocal(old),
600 encoding.tolocal(old),
522 encoding.tolocal(new))
601 encoding.tolocal(new))
@@ -1,749 +1,749
1 #require no-chg
1 #require no-chg
2
2
3 $ . $TESTDIR/wireprotohelpers.sh
3 $ . $TESTDIR/wireprotohelpers.sh
4
4
5 $ cat >> $HGRCPATH << EOF
5 $ cat >> $HGRCPATH << EOF
6 > [web]
6 > [web]
7 > push_ssl = false
7 > push_ssl = false
8 > allow_push = *
8 > allow_push = *
9 > EOF
9 > EOF
10
10
11 $ hg init server
11 $ hg init server
12 $ cd server
12 $ cd server
13 $ touch a
13 $ touch a
14 $ hg -q commit -A -m initial
14 $ hg -q commit -A -m initial
15 $ cd ..
15 $ cd ..
16
16
17 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
17 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
18 $ cat hg.pid >> $DAEMON_PIDS
18 $ cat hg.pid >> $DAEMON_PIDS
19
19
20 compression formats are advertised in compression capability
20 compression formats are advertised in compression capability
21
21
22 #if zstd
22 #if zstd
23 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
23 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zstd,zlib$' > /dev/null
24 #else
24 #else
25 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
25 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=zlib$' > /dev/null
26 #endif
26 #endif
27
27
28 $ killdaemons.py
28 $ killdaemons.py
29
29
30 server.compressionengines can replace engines list wholesale
30 server.compressionengines can replace engines list wholesale
31
31
32 $ hg serve --config server.compressionengines=none -R server -p $HGPORT -d --pid-file hg.pid
32 $ hg serve --config server.compressionengines=none -R server -p $HGPORT -d --pid-file hg.pid
33 $ cat hg.pid > $DAEMON_PIDS
33 $ cat hg.pid > $DAEMON_PIDS
34 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
34 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none$' > /dev/null
35
35
36 $ killdaemons.py
36 $ killdaemons.py
37
37
38 Order of engines can also change
38 Order of engines can also change
39
39
40 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
40 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
41 $ cat hg.pid > $DAEMON_PIDS
41 $ cat hg.pid > $DAEMON_PIDS
42 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
42 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | tr ' ' '\n' | grep '^compression=none,zlib$' > /dev/null
43
43
44 $ killdaemons.py
44 $ killdaemons.py
45
45
46 Start a default server again
46 Start a default server again
47
47
48 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
48 $ hg serve -R server -p $HGPORT -d --pid-file hg.pid
49 $ cat hg.pid > $DAEMON_PIDS
49 $ cat hg.pid > $DAEMON_PIDS
50
50
51 Server should send application/mercurial-0.1 to clients if no Accept is used
51 Server should send application/mercurial-0.1 to clients if no Accept is used
52
52
53 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
53 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
54 200 Script output follows
54 200 Script output follows
55 content-type: application/mercurial-0.1
55 content-type: application/mercurial-0.1
56 date: $HTTP_DATE$
56 date: $HTTP_DATE$
57 server: testing stub value
57 server: testing stub value
58 transfer-encoding: chunked
58 transfer-encoding: chunked
59
59
60 Server should send application/mercurial-0.1 when client says it wants it
60 Server should send application/mercurial-0.1 when client says it wants it
61
61
62 $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
62 $ get-with-headers.py --hgproto '0.1' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
63 200 Script output follows
63 200 Script output follows
64 content-type: application/mercurial-0.1
64 content-type: application/mercurial-0.1
65 date: $HTTP_DATE$
65 date: $HTTP_DATE$
66 server: testing stub value
66 server: testing stub value
67 transfer-encoding: chunked
67 transfer-encoding: chunked
68
68
69 Server should send application/mercurial-0.2 when client says it wants it
69 Server should send application/mercurial-0.2 when client says it wants it
70
70
71 $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
71 $ get-with-headers.py --hgproto '0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
72 200 Script output follows
72 200 Script output follows
73 content-type: application/mercurial-0.2
73 content-type: application/mercurial-0.2
74 date: $HTTP_DATE$
74 date: $HTTP_DATE$
75 server: testing stub value
75 server: testing stub value
76 transfer-encoding: chunked
76 transfer-encoding: chunked
77
77
78 $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
78 $ get-with-headers.py --hgproto '0.1 0.2' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
79 200 Script output follows
79 200 Script output follows
80 content-type: application/mercurial-0.2
80 content-type: application/mercurial-0.2
81 date: $HTTP_DATE$
81 date: $HTTP_DATE$
82 server: testing stub value
82 server: testing stub value
83 transfer-encoding: chunked
83 transfer-encoding: chunked
84
84
85 Requesting a compression format that server doesn't support results will fall back to 0.1
85 Requesting a compression format that server doesn't support results will fall back to 0.1
86
86
87 $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
87 $ get-with-headers.py --hgproto '0.2 comp=aa' --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' -
88 200 Script output follows
88 200 Script output follows
89 content-type: application/mercurial-0.1
89 content-type: application/mercurial-0.1
90 date: $HTTP_DATE$
90 date: $HTTP_DATE$
91 server: testing stub value
91 server: testing stub value
92 transfer-encoding: chunked
92 transfer-encoding: chunked
93
93
94 #if zstd
94 #if zstd
95 zstd is used if available
95 zstd is used if available
96
96
97 $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
97 $ get-with-headers.py --hgproto '0.2 comp=zstd' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
98 $ f --size --hexdump --bytes 36 --sha1 resp
98 $ f --size --hexdump --bytes 36 --sha1 resp
99 resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
99 resp: size=248, sha1=4d8d8f87fb82bd542ce52881fdc94f850748
100 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
100 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
101 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 73 74 64 |t follows...zstd|
101 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 73 74 64 |t follows...zstd|
102 0020: 28 b5 2f fd |(./.|
102 0020: 28 b5 2f fd |(./.|
103
103
104 #endif
104 #endif
105
105
106 application/mercurial-0.2 is not yet used on non-streaming responses
106 application/mercurial-0.2 is not yet used on non-streaming responses
107
107
108 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
108 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=heads' -
109 200 Script output follows
109 200 Script output follows
110 content-length: 41
110 content-length: 41
111 content-type: application/mercurial-0.1
111 content-type: application/mercurial-0.1
112 date: $HTTP_DATE$
112 date: $HTTP_DATE$
113 server: testing stub value
113 server: testing stub value
114
114
115 e93700bd72895c5addab234c56d4024b487a362f
115 e93700bd72895c5addab234c56d4024b487a362f
116
116
117 Now test protocol preference usage
117 Now test protocol preference usage
118
118
119 $ killdaemons.py
119 $ killdaemons.py
120 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
120 $ hg serve --config server.compressionengines=none,zlib -R server -p $HGPORT -d --pid-file hg.pid
121 $ cat hg.pid > $DAEMON_PIDS
121 $ cat hg.pid > $DAEMON_PIDS
122
122
123 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
123 No Accept will send 0.1+zlib, even though "none" is preferred b/c "none" isn't supported on 0.1
124
124
125 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
125 $ get-with-headers.py --headeronly $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' Content-Type
126 200 Script output follows
126 200 Script output follows
127 content-type: application/mercurial-0.1
127 content-type: application/mercurial-0.1
128
128
129 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
129 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
130 $ f --size --hexdump --bytes 28 --sha1 resp
130 $ f --size --hexdump --bytes 28 --sha1 resp
131 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
131 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
132 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
132 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
133 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
133 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
134
134
135 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
135 Explicit 0.1 will send zlib because "none" isn't supported on 0.1
136
136
137 $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
137 $ get-with-headers.py --hgproto '0.1' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
138 $ f --size --hexdump --bytes 28 --sha1 resp
138 $ f --size --hexdump --bytes 28 --sha1 resp
139 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
139 resp: size=227, sha1=35a4c074da74f32f5440da3cbf04
140 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
140 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
141 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
141 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 78 |t follows..x|
142
142
143 0.2 with no compression will get "none" because that is server's preference
143 0.2 with no compression will get "none" because that is server's preference
144 (spec says ZL and UN are implicitly supported)
144 (spec says ZL and UN are implicitly supported)
145
145
146 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
146 $ get-with-headers.py --hgproto '0.2' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
147 $ f --size --hexdump --bytes 32 --sha1 resp
147 $ f --size --hexdump --bytes 32 --sha1 resp
148 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
148 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
149 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
149 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
150 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
150 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
151
151
152 Client receives server preference even if local order doesn't match
152 Client receives server preference even if local order doesn't match
153
153
154 $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
154 $ get-with-headers.py --hgproto '0.2 comp=zlib,none' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
155 $ f --size --hexdump --bytes 32 --sha1 resp
155 $ f --size --hexdump --bytes 32 --sha1 resp
156 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
156 resp: size=432, sha1=ac931b412ec185a02e0e5bcff98dac83
157 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
157 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
158 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
158 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 6e 6f 6e 65 |t follows...none|
159
159
160 Client receives only supported format even if not server preferred format
160 Client receives only supported format even if not server preferred format
161
161
162 $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
162 $ get-with-headers.py --hgproto '0.2 comp=zlib' $LOCALIP:$HGPORT '?cmd=getbundle&heads=e93700bd72895c5addab234c56d4024b487a362f&common=0000000000000000000000000000000000000000' > resp
163 $ f --size --hexdump --bytes 33 --sha1 resp
163 $ f --size --hexdump --bytes 33 --sha1 resp
164 resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
164 resp: size=232, sha1=a1c727f0c9693ca15742a75c30419bc36
165 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
165 0000: 32 30 30 20 53 63 72 69 70 74 20 6f 75 74 70 75 |200 Script outpu|
166 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
166 0010: 74 20 66 6f 6c 6c 6f 77 73 0a 0a 04 7a 6c 69 62 |t follows...zlib|
167 0020: 78 |x|
167 0020: 78 |x|
168
168
169 $ killdaemons.py
169 $ killdaemons.py
170 $ cd ..
170 $ cd ..
171
171
172 Test listkeys for listing namespaces
172 Test listkeys for listing namespaces
173
173
174 $ hg init empty
174 $ hg init empty
175 $ hg -R empty serve -p $HGPORT -d --pid-file hg.pid
175 $ hg -R empty serve -p $HGPORT -d --pid-file hg.pid
176 $ cat hg.pid > $DAEMON_PIDS
176 $ cat hg.pid > $DAEMON_PIDS
177
177
178 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
178 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
179 > command listkeys
179 > command listkeys
180 > namespace namespaces
180 > namespace namespaces
181 > EOF
181 > EOF
182 s> GET /?cmd=capabilities HTTP/1.1\r\n
182 s> GET /?cmd=capabilities HTTP/1.1\r\n
183 s> Accept-Encoding: identity\r\n
183 s> Accept-Encoding: identity\r\n
184 s> accept: application/mercurial-0.1\r\n
184 s> accept: application/mercurial-0.1\r\n
185 s> host: $LOCALIP:$HGPORT\r\n (glob)
185 s> host: $LOCALIP:$HGPORT\r\n (glob)
186 s> user-agent: Mercurial debugwireproto\r\n
186 s> user-agent: Mercurial debugwireproto\r\n
187 s> \r\n
187 s> \r\n
188 s> makefile('rb', None)
188 s> makefile('rb', None)
189 s> HTTP/1.1 200 Script output follows\r\n
189 s> HTTP/1.1 200 Script output follows\r\n
190 s> Server: testing stub value\r\n
190 s> Server: testing stub value\r\n
191 s> Date: $HTTP_DATE$\r\n
191 s> Date: $HTTP_DATE$\r\n
192 s> Content-Type: application/mercurial-0.1\r\n
192 s> Content-Type: application/mercurial-0.1\r\n
193 s> Content-Length: *\r\n (glob)
193 s> Content-Length: *\r\n (glob)
194 s> \r\n
194 s> \r\n
195 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
195 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
196 sending listkeys command
196 sending listkeys command
197 s> GET /?cmd=listkeys HTTP/1.1\r\n
197 s> GET /?cmd=listkeys HTTP/1.1\r\n
198 s> Accept-Encoding: identity\r\n
198 s> Accept-Encoding: identity\r\n
199 s> vary: X-HgArg-1,X-HgProto-1\r\n
199 s> vary: X-HgArg-1,X-HgProto-1\r\n
200 s> x-hgarg-1: namespace=namespaces\r\n
200 s> x-hgarg-1: namespace=namespaces\r\n
201 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
201 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
202 s> accept: application/mercurial-0.1\r\n
202 s> accept: application/mercurial-0.1\r\n
203 s> host: $LOCALIP:$HGPORT\r\n (glob)
203 s> host: $LOCALIP:$HGPORT\r\n (glob)
204 s> user-agent: Mercurial debugwireproto\r\n
204 s> user-agent: Mercurial debugwireproto\r\n
205 s> \r\n
205 s> \r\n
206 s> makefile('rb', None)
206 s> makefile('rb', None)
207 s> HTTP/1.1 200 Script output follows\r\n
207 s> HTTP/1.1 200 Script output follows\r\n
208 s> Server: testing stub value\r\n
208 s> Server: testing stub value\r\n
209 s> Date: $HTTP_DATE$\r\n
209 s> Date: $HTTP_DATE$\r\n
210 s> Content-Type: application/mercurial-0.1\r\n
210 s> Content-Type: application/mercurial-0.1\r\n
211 s> Content-Length: 30\r\n
211 s> Content-Length: 30\r\n
212 s> \r\n
212 s> \r\n
213 s> bookmarks\t\n
213 s> bookmarks\t\n
214 s> namespaces\t\n
214 s> namespaces\t\n
215 s> phases\t
215 s> phases\t
216 response: {
216 response: {
217 b'bookmarks': b'',
217 b'bookmarks': b'',
218 b'namespaces': b'',
218 b'namespaces': b'',
219 b'phases': b''
219 b'phases': b''
220 }
220 }
221
221
222 Same thing, but with "httprequest" command
222 Same thing, but with "httprequest" command
223
223
224 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
224 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
225 > httprequest GET ?cmd=listkeys
225 > httprequest GET ?cmd=listkeys
226 > user-agent: test
226 > user-agent: test
227 > x-hgarg-1: namespace=namespaces
227 > x-hgarg-1: namespace=namespaces
228 > EOF
228 > EOF
229 using raw connection to peer
229 using raw connection to peer
230 s> GET /?cmd=listkeys HTTP/1.1\r\n
230 s> GET /?cmd=listkeys HTTP/1.1\r\n
231 s> Accept-Encoding: identity\r\n
231 s> Accept-Encoding: identity\r\n
232 s> user-agent: test\r\n
232 s> user-agent: test\r\n
233 s> x-hgarg-1: namespace=namespaces\r\n
233 s> x-hgarg-1: namespace=namespaces\r\n
234 s> host: $LOCALIP:$HGPORT\r\n (glob)
234 s> host: $LOCALIP:$HGPORT\r\n (glob)
235 s> \r\n
235 s> \r\n
236 s> makefile('rb', None)
236 s> makefile('rb', None)
237 s> HTTP/1.1 200 Script output follows\r\n
237 s> HTTP/1.1 200 Script output follows\r\n
238 s> Server: testing stub value\r\n
238 s> Server: testing stub value\r\n
239 s> Date: $HTTP_DATE$\r\n
239 s> Date: $HTTP_DATE$\r\n
240 s> Content-Type: application/mercurial-0.1\r\n
240 s> Content-Type: application/mercurial-0.1\r\n
241 s> Content-Length: 30\r\n
241 s> Content-Length: 30\r\n
242 s> \r\n
242 s> \r\n
243 s> bookmarks\t\n
243 s> bookmarks\t\n
244 s> namespaces\t\n
244 s> namespaces\t\n
245 s> phases\t
245 s> phases\t
246
246
247 Client with HTTPv2 enabled advertises that and gets old capabilities response from old server
247 Client with HTTPv2 enabled advertises that and gets old capabilities response from old server
248
248
249 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
249 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
250 > command heads
250 > command heads
251 > EOF
251 > EOF
252 s> GET /?cmd=capabilities HTTP/1.1\r\n
252 s> GET /?cmd=capabilities HTTP/1.1\r\n
253 s> Accept-Encoding: identity\r\n
253 s> Accept-Encoding: identity\r\n
254 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
254 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
255 s> x-hgproto-1: cbor\r\n
255 s> x-hgproto-1: cbor\r\n
256 s> x-hgupgrade-1: exp-http-v2-0001\r\n
256 s> x-hgupgrade-1: exp-http-v2-0001\r\n
257 s> accept: application/mercurial-0.1\r\n
257 s> accept: application/mercurial-0.1\r\n
258 s> host: $LOCALIP:$HGPORT\r\n (glob)
258 s> host: $LOCALIP:$HGPORT\r\n (glob)
259 s> user-agent: Mercurial debugwireproto\r\n
259 s> user-agent: Mercurial debugwireproto\r\n
260 s> \r\n
260 s> \r\n
261 s> makefile('rb', None)
261 s> makefile('rb', None)
262 s> HTTP/1.1 200 Script output follows\r\n
262 s> HTTP/1.1 200 Script output follows\r\n
263 s> Server: testing stub value\r\n
263 s> Server: testing stub value\r\n
264 s> Date: $HTTP_DATE$\r\n
264 s> Date: $HTTP_DATE$\r\n
265 s> Content-Type: application/mercurial-0.1\r\n
265 s> Content-Type: application/mercurial-0.1\r\n
266 s> Content-Length: *\r\n (glob)
266 s> Content-Length: *\r\n (glob)
267 s> \r\n
267 s> \r\n
268 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
268 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
269 sending heads command
269 sending heads command
270 s> GET /?cmd=heads HTTP/1.1\r\n
270 s> GET /?cmd=heads HTTP/1.1\r\n
271 s> Accept-Encoding: identity\r\n
271 s> Accept-Encoding: identity\r\n
272 s> vary: X-HgProto-1\r\n
272 s> vary: X-HgProto-1\r\n
273 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
273 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
274 s> accept: application/mercurial-0.1\r\n
274 s> accept: application/mercurial-0.1\r\n
275 s> host: $LOCALIP:$HGPORT\r\n (glob)
275 s> host: $LOCALIP:$HGPORT\r\n (glob)
276 s> user-agent: Mercurial debugwireproto\r\n
276 s> user-agent: Mercurial debugwireproto\r\n
277 s> \r\n
277 s> \r\n
278 s> makefile('rb', None)
278 s> makefile('rb', None)
279 s> HTTP/1.1 200 Script output follows\r\n
279 s> HTTP/1.1 200 Script output follows\r\n
280 s> Server: testing stub value\r\n
280 s> Server: testing stub value\r\n
281 s> Date: $HTTP_DATE$\r\n
281 s> Date: $HTTP_DATE$\r\n
282 s> Content-Type: application/mercurial-0.1\r\n
282 s> Content-Type: application/mercurial-0.1\r\n
283 s> Content-Length: 41\r\n
283 s> Content-Length: 41\r\n
284 s> \r\n
284 s> \r\n
285 s> 0000000000000000000000000000000000000000\n
285 s> 0000000000000000000000000000000000000000\n
286 response: [
286 response: [
287 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
287 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
288 ]
288 ]
289
289
290 $ killdaemons.py
290 $ killdaemons.py
291 $ enablehttpv2 empty
291 $ enablehttpv2 empty
292 $ hg --config server.compressionengines=zlib -R empty serve -p $HGPORT -d --pid-file hg.pid
292 $ hg --config server.compressionengines=zlib -R empty serve -p $HGPORT -d --pid-file hg.pid
293 $ cat hg.pid > $DAEMON_PIDS
293 $ cat hg.pid > $DAEMON_PIDS
294
294
295 Client with HTTPv2 enabled automatically upgrades if the server supports it
295 Client with HTTPv2 enabled automatically upgrades if the server supports it
296
296
297 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
297 $ hg --config experimental.httppeer.advertise-v2=true --verbose debugwireproto http://$LOCALIP:$HGPORT << EOF
298 > command heads
298 > command heads
299 > EOF
299 > EOF
300 s> GET /?cmd=capabilities HTTP/1.1\r\n
300 s> GET /?cmd=capabilities HTTP/1.1\r\n
301 s> Accept-Encoding: identity\r\n
301 s> Accept-Encoding: identity\r\n
302 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
302 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
303 s> x-hgproto-1: cbor\r\n
303 s> x-hgproto-1: cbor\r\n
304 s> x-hgupgrade-1: exp-http-v2-0001\r\n
304 s> x-hgupgrade-1: exp-http-v2-0001\r\n
305 s> accept: application/mercurial-0.1\r\n
305 s> accept: application/mercurial-0.1\r\n
306 s> host: $LOCALIP:$HGPORT\r\n (glob)
306 s> host: $LOCALIP:$HGPORT\r\n (glob)
307 s> user-agent: Mercurial debugwireproto\r\n
307 s> user-agent: Mercurial debugwireproto\r\n
308 s> \r\n
308 s> \r\n
309 s> makefile('rb', None)
309 s> makefile('rb', None)
310 s> HTTP/1.1 200 OK\r\n
310 s> HTTP/1.1 200 OK\r\n
311 s> Server: testing stub value\r\n
311 s> Server: testing stub value\r\n
312 s> Date: $HTTP_DATE$\r\n
312 s> Date: $HTTP_DATE$\r\n
313 s> Content-Type: application/mercurial-cbor\r\n
313 s> Content-Type: application/mercurial-cbor\r\n
314 s> Content-Length: *\r\n (glob)
314 s> Content-Length: *\r\n (glob)
315 s> \r\n
315 s> \r\n
316 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
316 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
317 sending heads command
317 sending heads command
318 s> POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
318 s> POST /api/exp-http-v2-0001/ro/heads HTTP/1.1\r\n
319 s> Accept-Encoding: identity\r\n
319 s> Accept-Encoding: identity\r\n
320 s> accept: application/mercurial-exp-framing-0005\r\n
320 s> accept: application/mercurial-exp-framing-0005\r\n
321 s> content-type: application/mercurial-exp-framing-0005\r\n
321 s> content-type: application/mercurial-exp-framing-0005\r\n
322 s> content-length: 20\r\n
322 s> content-length: 20\r\n
323 s> host: $LOCALIP:$HGPORT\r\n (glob)
323 s> host: $LOCALIP:$HGPORT\r\n (glob)
324 s> user-agent: Mercurial debugwireproto\r\n
324 s> user-agent: Mercurial debugwireproto\r\n
325 s> \r\n
325 s> \r\n
326 s> \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEheads
326 s> \x0c\x00\x00\x01\x00\x01\x01\x11\xa1DnameEheads
327 s> makefile('rb', None)
327 s> makefile('rb', None)
328 s> HTTP/1.1 200 OK\r\n
328 s> HTTP/1.1 200 OK\r\n
329 s> Server: testing stub value\r\n
329 s> Server: testing stub value\r\n
330 s> Date: $HTTP_DATE$\r\n
330 s> Date: $HTTP_DATE$\r\n
331 s> Content-Type: application/mercurial-exp-framing-0005\r\n
331 s> Content-Type: application/mercurial-exp-framing-0005\r\n
332 s> Transfer-Encoding: chunked\r\n
332 s> Transfer-Encoding: chunked\r\n
333 s> \r\n
333 s> \r\n
334 s> 13\r\n
334 s> 13\r\n
335 s> \x0b\x00\x00\x01\x00\x02\x011
335 s> \x0b\x00\x00\x01\x00\x02\x011
336 s> \xa1FstatusBok
336 s> \xa1FstatusBok
337 s> \r\n
337 s> \r\n
338 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
338 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
339 s> 1e\r\n
339 s> 1e\r\n
340 s> \x16\x00\x00\x01\x00\x02\x001
340 s> \x16\x00\x00\x01\x00\x02\x001
341 s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
341 s> \x81T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
342 s> \r\n
342 s> \r\n
343 received frame(size=22; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
343 received frame(size=22; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
344 s> 8\r\n
344 s> 8\r\n
345 s> \x00\x00\x00\x01\x00\x02\x002
345 s> \x00\x00\x00\x01\x00\x02\x002
346 s> \r\n
346 s> \r\n
347 s> 0\r\n
347 s> 0\r\n
348 s> \r\n
348 s> \r\n
349 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
349 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
350 response: [
350 response: [
351 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
351 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
352 ]
352 ]
353
353
354 $ killdaemons.py
354 $ killdaemons.py
355
355
356 HTTP client follows HTTP redirect on handshake to new repo
356 HTTP client follows HTTP redirect on handshake to new repo
357
357
358 $ cd $TESTTMP
358 $ cd $TESTTMP
359
359
360 $ hg init redirector
360 $ hg init redirector
361 $ hg init redirected
361 $ hg init redirected
362 $ cd redirected
362 $ cd redirected
363 $ touch foo
363 $ touch foo
364 $ hg -q commit -A -m initial
364 $ hg -q commit -A -m initial
365 $ cd ..
365 $ cd ..
366
366
367 $ cat > paths.conf << EOF
367 $ cat > paths.conf << EOF
368 > [paths]
368 > [paths]
369 > / = $TESTTMP/*
369 > / = $TESTTMP/*
370 > EOF
370 > EOF
371
371
372 $ cat > redirectext.py << EOF
372 $ cat > redirectext.py << EOF
373 > from mercurial import extensions, wireprotoserver
373 > from mercurial import extensions, wireprotoserver
374 > def wrappedcallhttp(orig, repo, req, res, proto, cmd):
374 > def wrappedcallhttp(orig, repo, req, res, proto, cmd):
375 > path = req.advertisedurl[len(req.advertisedbaseurl):]
375 > path = req.advertisedurl[len(req.advertisedbaseurl):]
376 > if not path.startswith(b'/redirector'):
376 > if not path.startswith(b'/redirector'):
377 > return orig(repo, req, res, proto, cmd)
377 > return orig(repo, req, res, proto, cmd)
378 > relpath = path[len(b'/redirector'):]
378 > relpath = path[len(b'/redirector'):]
379 > res.status = b'301 Redirect'
379 > res.status = b'301 Redirect'
380 > newurl = b'%s/redirected%s' % (req.baseurl, relpath)
380 > newurl = b'%s/redirected%s' % (req.baseurl, relpath)
381 > if not repo.ui.configbool('testing', 'redirectqs', True) and b'?' in newurl:
381 > if not repo.ui.configbool('testing', 'redirectqs', True) and b'?' in newurl:
382 > newurl = newurl[0:newurl.index(b'?')]
382 > newurl = newurl[0:newurl.index(b'?')]
383 > res.headers[b'Location'] = newurl
383 > res.headers[b'Location'] = newurl
384 > res.headers[b'Content-Type'] = b'text/plain'
384 > res.headers[b'Content-Type'] = b'text/plain'
385 > res.setbodybytes(b'redirected')
385 > res.setbodybytes(b'redirected')
386 > return True
386 > return True
387 >
387 >
388 > extensions.wrapfunction(wireprotoserver, '_callhttp', wrappedcallhttp)
388 > extensions.wrapfunction(wireprotoserver, '_callhttp', wrappedcallhttp)
389 > EOF
389 > EOF
390
390
391 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
391 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
392 > --config server.compressionengines=zlib \
392 > --config server.compressionengines=zlib \
393 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
393 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
394 $ cat hg.pid > $DAEMON_PIDS
394 $ cat hg.pid > $DAEMON_PIDS
395
395
396 Verify our HTTP 301 is served properly
396 Verify our HTTP 301 is served properly
397
397
398 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
398 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
399 > httprequest GET /redirector?cmd=capabilities
399 > httprequest GET /redirector?cmd=capabilities
400 > user-agent: test
400 > user-agent: test
401 > EOF
401 > EOF
402 using raw connection to peer
402 using raw connection to peer
403 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
403 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
404 s> Accept-Encoding: identity\r\n
404 s> Accept-Encoding: identity\r\n
405 s> user-agent: test\r\n
405 s> user-agent: test\r\n
406 s> host: $LOCALIP:$HGPORT\r\n (glob)
406 s> host: $LOCALIP:$HGPORT\r\n (glob)
407 s> \r\n
407 s> \r\n
408 s> makefile('rb', None)
408 s> makefile('rb', None)
409 s> HTTP/1.1 301 Redirect\r\n
409 s> HTTP/1.1 301 Redirect\r\n
410 s> Server: testing stub value\r\n
410 s> Server: testing stub value\r\n
411 s> Date: $HTTP_DATE$\r\n
411 s> Date: $HTTP_DATE$\r\n
412 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
412 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
413 s> Content-Type: text/plain\r\n
413 s> Content-Type: text/plain\r\n
414 s> Content-Length: 10\r\n
414 s> Content-Length: 10\r\n
415 s> \r\n
415 s> \r\n
416 s> redirected
416 s> redirected
417 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
417 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
418 s> Accept-Encoding: identity\r\n
418 s> Accept-Encoding: identity\r\n
419 s> user-agent: test\r\n
419 s> user-agent: test\r\n
420 s> host: $LOCALIP:$HGPORT\r\n (glob)
420 s> host: $LOCALIP:$HGPORT\r\n (glob)
421 s> \r\n
421 s> \r\n
422 s> makefile('rb', None)
422 s> makefile('rb', None)
423 s> HTTP/1.1 200 Script output follows\r\n
423 s> HTTP/1.1 200 Script output follows\r\n
424 s> Server: testing stub value\r\n
424 s> Server: testing stub value\r\n
425 s> Date: $HTTP_DATE$\r\n
425 s> Date: $HTTP_DATE$\r\n
426 s> Content-Type: application/mercurial-0.1\r\n
426 s> Content-Type: application/mercurial-0.1\r\n
427 s> Content-Length: 453\r\n
427 s> Content-Length: 453\r\n
428 s> \r\n
428 s> \r\n
429 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
429 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
430
430
431 Test with the HTTP peer
431 Test with the HTTP peer
432
432
433 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
433 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
434 > command heads
434 > command heads
435 > EOF
435 > EOF
436 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
436 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
437 s> Accept-Encoding: identity\r\n
437 s> Accept-Encoding: identity\r\n
438 s> accept: application/mercurial-0.1\r\n
438 s> accept: application/mercurial-0.1\r\n
439 s> host: $LOCALIP:$HGPORT\r\n (glob)
439 s> host: $LOCALIP:$HGPORT\r\n (glob)
440 s> user-agent: Mercurial debugwireproto\r\n
440 s> user-agent: Mercurial debugwireproto\r\n
441 s> \r\n
441 s> \r\n
442 s> makefile('rb', None)
442 s> makefile('rb', None)
443 s> HTTP/1.1 301 Redirect\r\n
443 s> HTTP/1.1 301 Redirect\r\n
444 s> Server: testing stub value\r\n
444 s> Server: testing stub value\r\n
445 s> Date: $HTTP_DATE$\r\n
445 s> Date: $HTTP_DATE$\r\n
446 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
446 s> Location: http://$LOCALIP:$HGPORT/redirected?cmd=capabilities\r\n (glob)
447 s> Content-Type: text/plain\r\n
447 s> Content-Type: text/plain\r\n
448 s> Content-Length: 10\r\n
448 s> Content-Length: 10\r\n
449 s> \r\n
449 s> \r\n
450 s> redirected
450 s> redirected
451 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
451 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
452 s> Accept-Encoding: identity\r\n
452 s> Accept-Encoding: identity\r\n
453 s> accept: application/mercurial-0.1\r\n
453 s> accept: application/mercurial-0.1\r\n
454 s> host: $LOCALIP:$HGPORT\r\n (glob)
454 s> host: $LOCALIP:$HGPORT\r\n (glob)
455 s> user-agent: Mercurial debugwireproto\r\n
455 s> user-agent: Mercurial debugwireproto\r\n
456 s> \r\n
456 s> \r\n
457 s> makefile('rb', None)
457 s> makefile('rb', None)
458 s> HTTP/1.1 200 Script output follows\r\n
458 s> HTTP/1.1 200 Script output follows\r\n
459 s> Server: testing stub value\r\n
459 s> Server: testing stub value\r\n
460 s> Date: $HTTP_DATE$\r\n
460 s> Date: $HTTP_DATE$\r\n
461 s> Content-Type: application/mercurial-0.1\r\n
461 s> Content-Type: application/mercurial-0.1\r\n
462 s> Content-Length: 453\r\n
462 s> Content-Length: 453\r\n
463 s> \r\n
463 s> \r\n
464 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
464 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
465 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
465 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
466 sending heads command
466 sending heads command
467 s> GET /redirected?cmd=heads HTTP/1.1\r\n
467 s> GET /redirected?cmd=heads HTTP/1.1\r\n
468 s> Accept-Encoding: identity\r\n
468 s> Accept-Encoding: identity\r\n
469 s> vary: X-HgProto-1\r\n
469 s> vary: X-HgProto-1\r\n
470 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
470 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
471 s> accept: application/mercurial-0.1\r\n
471 s> accept: application/mercurial-0.1\r\n
472 s> host: $LOCALIP:$HGPORT\r\n (glob)
472 s> host: $LOCALIP:$HGPORT\r\n (glob)
473 s> user-agent: Mercurial debugwireproto\r\n
473 s> user-agent: Mercurial debugwireproto\r\n
474 s> \r\n
474 s> \r\n
475 s> makefile('rb', None)
475 s> makefile('rb', None)
476 s> HTTP/1.1 200 Script output follows\r\n
476 s> HTTP/1.1 200 Script output follows\r\n
477 s> Server: testing stub value\r\n
477 s> Server: testing stub value\r\n
478 s> Date: $HTTP_DATE$\r\n
478 s> Date: $HTTP_DATE$\r\n
479 s> Content-Type: application/mercurial-0.1\r\n
479 s> Content-Type: application/mercurial-0.1\r\n
480 s> Content-Length: 41\r\n
480 s> Content-Length: 41\r\n
481 s> \r\n
481 s> \r\n
482 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
482 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
483 response: [
483 response: [
484 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
484 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
485 ]
485 ]
486
486
487 $ killdaemons.py
487 $ killdaemons.py
488
488
489 Now test a variation where we strip the query string from the redirect URL.
489 Now test a variation where we strip the query string from the redirect URL.
490 (SCM Manager apparently did this and clients would recover from it)
490 (SCM Manager apparently did this and clients would recover from it)
491
491
492 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
492 $ hg --config extensions.redirect=$TESTTMP/redirectext.py \
493 > --config server.compressionengines=zlib \
493 > --config server.compressionengines=zlib \
494 > --config testing.redirectqs=false \
494 > --config testing.redirectqs=false \
495 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
495 > serve --web-conf paths.conf --pid-file hg.pid -p $HGPORT -d
496 $ cat hg.pid > $DAEMON_PIDS
496 $ cat hg.pid > $DAEMON_PIDS
497
497
498 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
498 $ hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT << EOF
499 > httprequest GET /redirector?cmd=capabilities
499 > httprequest GET /redirector?cmd=capabilities
500 > user-agent: test
500 > user-agent: test
501 > EOF
501 > EOF
502 using raw connection to peer
502 using raw connection to peer
503 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
503 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
504 s> Accept-Encoding: identity\r\n
504 s> Accept-Encoding: identity\r\n
505 s> user-agent: test\r\n
505 s> user-agent: test\r\n
506 s> host: $LOCALIP:$HGPORT\r\n (glob)
506 s> host: $LOCALIP:$HGPORT\r\n (glob)
507 s> \r\n
507 s> \r\n
508 s> makefile('rb', None)
508 s> makefile('rb', None)
509 s> HTTP/1.1 301 Redirect\r\n
509 s> HTTP/1.1 301 Redirect\r\n
510 s> Server: testing stub value\r\n
510 s> Server: testing stub value\r\n
511 s> Date: $HTTP_DATE$\r\n
511 s> Date: $HTTP_DATE$\r\n
512 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
512 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
513 s> Content-Type: text/plain\r\n
513 s> Content-Type: text/plain\r\n
514 s> Content-Length: 10\r\n
514 s> Content-Length: 10\r\n
515 s> \r\n
515 s> \r\n
516 s> redirected
516 s> redirected
517 s> GET /redirected HTTP/1.1\r\n
517 s> GET /redirected HTTP/1.1\r\n
518 s> Accept-Encoding: identity\r\n
518 s> Accept-Encoding: identity\r\n
519 s> user-agent: test\r\n
519 s> user-agent: test\r\n
520 s> host: $LOCALIP:$HGPORT\r\n (glob)
520 s> host: $LOCALIP:$HGPORT\r\n (glob)
521 s> \r\n
521 s> \r\n
522 s> makefile('rb', None)
522 s> makefile('rb', None)
523 s> HTTP/1.1 200 Script output follows\r\n
523 s> HTTP/1.1 200 Script output follows\r\n
524 s> Server: testing stub value\r\n
524 s> Server: testing stub value\r\n
525 s> Date: $HTTP_DATE$\r\n
525 s> Date: $HTTP_DATE$\r\n
526 s> ETag: W/"*"\r\n (glob)
526 s> ETag: W/"*"\r\n (glob)
527 s> Content-Type: text/html; charset=ascii\r\n
527 s> Content-Type: text/html; charset=ascii\r\n
528 s> Transfer-Encoding: chunked\r\n
528 s> Transfer-Encoding: chunked\r\n
529 s> \r\n
529 s> \r\n
530 s> 414\r\n
530 s> 414\r\n
531 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
531 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
532 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
532 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
533 s> <head>\n
533 s> <head>\n
534 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
534 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
535 s> <meta name="robots" content="index, nofollow" />\n
535 s> <meta name="robots" content="index, nofollow" />\n
536 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
536 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
537 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
537 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
538 s> \n
538 s> \n
539 s> <title>redirected: log</title>\n
539 s> <title>redirected: log</title>\n
540 s> <link rel="alternate" type="application/atom+xml"\n
540 s> <link rel="alternate" type="application/atom+xml"\n
541 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
541 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
542 s> <link rel="alternate" type="application/rss+xml"\n
542 s> <link rel="alternate" type="application/rss+xml"\n
543 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
543 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
544 s> </head>\n
544 s> </head>\n
545 s> <body>\n
545 s> <body>\n
546 s> \n
546 s> \n
547 s> <div class="container">\n
547 s> <div class="container">\n
548 s> <div class="menu">\n
548 s> <div class="menu">\n
549 s> <div class="logo">\n
549 s> <div class="logo">\n
550 s> <a href="https://mercurial-scm.org/">\n
550 s> <a href="https://mercurial-scm.org/">\n
551 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
551 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
552 s> </div>\n
552 s> </div>\n
553 s> <ul>\n
553 s> <ul>\n
554 s> <li class="active">log</li>\n
554 s> <li class="active">log</li>\n
555 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
555 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
556 s> <li><a href="/redirected/tags">tags</a></li>\n
556 s> <li><a href="/redirected/tags">tags</a></li>\n
557 s> <li><a href="
557 s> <li><a href="
558 s> \r\n
558 s> \r\n
559 s> 810\r\n
559 s> 810\r\n
560 s> /redirected/bookmarks">bookmarks</a></li>\n
560 s> /redirected/bookmarks">bookmarks</a></li>\n
561 s> <li><a href="/redirected/branches">branches</a></li>\n
561 s> <li><a href="/redirected/branches">branches</a></li>\n
562 s> </ul>\n
562 s> </ul>\n
563 s> <ul>\n
563 s> <ul>\n
564 s> <li><a href="/redirected/rev/tip">changeset</a></li>\n
564 s> <li><a href="/redirected/rev/tip">changeset</a></li>\n
565 s> <li><a href="/redirected/file/tip">browse</a></li>\n
565 s> <li><a href="/redirected/file/tip">browse</a></li>\n
566 s> </ul>\n
566 s> </ul>\n
567 s> <ul>\n
567 s> <ul>\n
568 s> \n
568 s> \n
569 s> </ul>\n
569 s> </ul>\n
570 s> <ul>\n
570 s> <ul>\n
571 s> <li><a href="/redirected/help">help</a></li>\n
571 s> <li><a href="/redirected/help">help</a></li>\n
572 s> </ul>\n
572 s> </ul>\n
573 s> <div class="atom-logo">\n
573 s> <div class="atom-logo">\n
574 s> <a href="/redirected/atom-log" title="subscribe to atom feed">\n
574 s> <a href="/redirected/atom-log" title="subscribe to atom feed">\n
575 s> <img class="atom-logo" src="/redirected/static/feed-icon-14x14.png" alt="atom feed" />\n
575 s> <img class="atom-logo" src="/redirected/static/feed-icon-14x14.png" alt="atom feed" />\n
576 s> </a>\n
576 s> </a>\n
577 s> </div>\n
577 s> </div>\n
578 s> </div>\n
578 s> </div>\n
579 s> \n
579 s> \n
580 s> <div class="main">\n
580 s> <div class="main">\n
581 s> <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/redirected">redirected</a> </h2>\n
581 s> <h2 class="breadcrumb"><a href="/">Mercurial</a> &gt; <a href="/redirected">redirected</a> </h2>\n
582 s> <h3>log</h3>\n
582 s> <h3>log</h3>\n
583 s> \n
583 s> \n
584 s> \n
584 s> \n
585 s> <form class="search" action="/redirected/log">\n
585 s> <form class="search" action="/redirected/log">\n
586 s> \n
586 s> \n
587 s> <p><input name="rev" id="search1" type="text" size="30" value="" /></p>\n
587 s> <p><input name="rev" id="search1" type="text" size="30" value="" /></p>\n
588 s> <div id="hint">Find changesets by keywords (author, files, the commit message), revision\n
588 s> <div id="hint">Find changesets by keywords (author, files, the commit message), revision\n
589 s> number or hash, or <a href="/redirected/help/revsets">revset expression</a>.</div>\n
589 s> number or hash, or <a href="/redirected/help/revsets">revset expression</a>.</div>\n
590 s> </form>\n
590 s> </form>\n
591 s> \n
591 s> \n
592 s> <div class="navigate">\n
592 s> <div class="navigate">\n
593 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
593 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
594 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
594 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
595 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
595 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
596 s> </div>\n
596 s> </div>\n
597 s> \n
597 s> \n
598 s> <table class="bigtable">\n
598 s> <table class="bigtable">\n
599 s> <thead>\n
599 s> <thead>\n
600 s> <tr>\n
600 s> <tr>\n
601 s> <th class="age">age</th>\n
601 s> <th class="age">age</th>\n
602 s> <th class="author">author</th>\n
602 s> <th class="author">author</th>\n
603 s> <th class="description">description</th>\n
603 s> <th class="description">description</th>\n
604 s> </tr>\n
604 s> </tr>\n
605 s> </thead>\n
605 s> </thead>\n
606 s> <tbody class="stripes2">\n
606 s> <tbody class="stripes2">\n
607 s> <tr>\n
607 s> <tr>\n
608 s> <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>\n
608 s> <td class="age">Thu, 01 Jan 1970 00:00:00 +0000</td>\n
609 s> <td class="author">test</td>\n
609 s> <td class="author">test</td>\n
610 s> <td class="description">\n
610 s> <td class="description">\n
611 s> <a href="/redirected/rev/96ee1d7354c4">initial</a>\n
611 s> <a href="/redirected/rev/96ee1d7354c4">initial</a>\n
612 s> <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> \n
612 s> <span class="phase">draft</span> <span class="branchhead">default</span> <span class="tag">tip</span> \n
613 s> </td>\n
613 s> </td>\n
614 s> </tr>\n
614 s> </tr>\n
615 s> \n
615 s> \n
616 s> </tbody>\n
616 s> </tbody>\n
617 s> </table>\n
617 s> </table>\n
618 s> \n
618 s> \n
619 s> <div class="navigate">\n
619 s> <div class="navigate">\n
620 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
620 s> <a href="/redirected/shortlog/tip?revcount=30">less</a>\n
621 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
621 s> <a href="/redirected/shortlog/tip?revcount=120">more</a>\n
622 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
622 s> | rev 0: <a href="/redirected/shortlog/96ee1d7354c4">(0)</a> <a href="/redirected/shortlog/tip">tip</a> \n
623 s> </div>\n
623 s> </div>\n
624 s> \n
624 s> \n
625 s> <script type="text/javascript">\n
625 s> <script type="text/javascript">\n
626 s> ajaxScrollInit(\n
626 s> ajaxScrollInit(\n
627 s> \'/redirected/shortlog/%next%\',\n
627 s> \'/redirected/shortlog/%next%\',\n
628 s> \'\', <!-- NEXTHASH\n
628 s> \'\', <!-- NEXTHASH\n
629 s> function (htmlText) {
629 s> function (htmlText) {
630 s> \r\n
630 s> \r\n
631 s> 14a\r\n
631 s> 14a\r\n
632 s> \n
632 s> \n
633 s> var m = htmlText.match(/\'(\\w+)\', <!-- NEXTHASH/);\n
633 s> var m = htmlText.match(/\'(\\w+)\', <!-- NEXTHASH/);\n
634 s> return m ? m[1] : null;\n
634 s> return m ? m[1] : null;\n
635 s> },\n
635 s> },\n
636 s> \'.bigtable > tbody\',\n
636 s> \'.bigtable > tbody\',\n
637 s> \'<tr class="%class%">\\\n
637 s> \'<tr class="%class%">\\\n
638 s> <td colspan="3" style="text-align: center;">%text%</td>\\\n
638 s> <td colspan="3" style="text-align: center;">%text%</td>\\\n
639 s> </tr>\'\n
639 s> </tr>\'\n
640 s> );\n
640 s> );\n
641 s> </script>\n
641 s> </script>\n
642 s> \n
642 s> \n
643 s> </div>\n
643 s> </div>\n
644 s> </div>\n
644 s> </div>\n
645 s> \n
645 s> \n
646 s> \n
646 s> \n
647 s> \n
647 s> \n
648 s> </body>\n
648 s> </body>\n
649 s> </html>\n
649 s> </html>\n
650 s> \n
650 s> \n
651 s> \r\n
651 s> \r\n
652 s> 0\r\n
652 s> 0\r\n
653 s> \r\n
653 s> \r\n
654
654
655 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
655 $ hg --verbose debugwireproto http://$LOCALIP:$HGPORT/redirector << EOF
656 > command heads
656 > command heads
657 > EOF
657 > EOF
658 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
658 s> GET /redirector?cmd=capabilities HTTP/1.1\r\n
659 s> Accept-Encoding: identity\r\n
659 s> Accept-Encoding: identity\r\n
660 s> accept: application/mercurial-0.1\r\n
660 s> accept: application/mercurial-0.1\r\n
661 s> host: $LOCALIP:$HGPORT\r\n (glob)
661 s> host: $LOCALIP:$HGPORT\r\n (glob)
662 s> user-agent: Mercurial debugwireproto\r\n
662 s> user-agent: Mercurial debugwireproto\r\n
663 s> \r\n
663 s> \r\n
664 s> makefile('rb', None)
664 s> makefile('rb', None)
665 s> HTTP/1.1 301 Redirect\r\n
665 s> HTTP/1.1 301 Redirect\r\n
666 s> Server: testing stub value\r\n
666 s> Server: testing stub value\r\n
667 s> Date: $HTTP_DATE$\r\n
667 s> Date: $HTTP_DATE$\r\n
668 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
668 s> Location: http://$LOCALIP:$HGPORT/redirected\r\n (glob)
669 s> Content-Type: text/plain\r\n
669 s> Content-Type: text/plain\r\n
670 s> Content-Length: 10\r\n
670 s> Content-Length: 10\r\n
671 s> \r\n
671 s> \r\n
672 s> redirected
672 s> redirected
673 s> GET /redirected HTTP/1.1\r\n
673 s> GET /redirected HTTP/1.1\r\n
674 s> Accept-Encoding: identity\r\n
674 s> Accept-Encoding: identity\r\n
675 s> accept: application/mercurial-0.1\r\n
675 s> accept: application/mercurial-0.1\r\n
676 s> host: $LOCALIP:$HGPORT\r\n (glob)
676 s> host: $LOCALIP:$HGPORT\r\n (glob)
677 s> user-agent: Mercurial debugwireproto\r\n
677 s> user-agent: Mercurial debugwireproto\r\n
678 s> \r\n
678 s> \r\n
679 s> makefile('rb', None)
679 s> makefile('rb', None)
680 s> HTTP/1.1 200 Script output follows\r\n
680 s> HTTP/1.1 200 Script output follows\r\n
681 s> Server: testing stub value\r\n
681 s> Server: testing stub value\r\n
682 s> Date: $HTTP_DATE$\r\n
682 s> Date: $HTTP_DATE$\r\n
683 s> ETag: W/"*"\r\n (glob)
683 s> ETag: W/"*"\r\n (glob)
684 s> Content-Type: text/html; charset=ascii\r\n
684 s> Content-Type: text/html; charset=ascii\r\n
685 s> Transfer-Encoding: chunked\r\n
685 s> Transfer-Encoding: chunked\r\n
686 s> \r\n
686 s> \r\n
687 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
687 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
688 s> 414\r\n
688 s> 414\r\n
689 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
689 s> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n
690 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
690 s> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">\n
691 s> <head>\n
691 s> <head>\n
692 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
692 s> <link rel="icon" href="/redirected/static/hgicon.png" type="image/png" />\n
693 s> <meta name="robots" content="index, nofollow" />\n
693 s> <meta name="robots" content="index, nofollow" />\n
694 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
694 s> <link rel="stylesheet" href="/redirected/static/style-paper.css" type="text/css" />\n
695 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
695 s> <script type="text/javascript" src="/redirected/static/mercurial.js"></script>\n
696 s> \n
696 s> \n
697 s> <title>redirected: log</title>\n
697 s> <title>redirected: log</title>\n
698 s> <link rel="alternate" type="application/atom+xml"\n
698 s> <link rel="alternate" type="application/atom+xml"\n
699 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
699 s> href="/redirected/atom-log" title="Atom feed for redirected" />\n
700 s> <link rel="alternate" type="application/rss+xml"\n
700 s> <link rel="alternate" type="application/rss+xml"\n
701 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
701 s> href="/redirected/rss-log" title="RSS feed for redirected" />\n
702 s> </head>\n
702 s> </head>\n
703 s> <body>\n
703 s> <body>\n
704 s> \n
704 s> \n
705 s> <div class="container">\n
705 s> <div class="container">\n
706 s> <div class="menu">\n
706 s> <div class="menu">\n
707 s> <div class="logo">\n
707 s> <div class="logo">\n
708 s> <a href="https://mercurial-scm.org/">\n
708 s> <a href="https://mercurial-scm.org/">\n
709 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
709 s> <img src="/redirected/static/hglogo.png" alt="mercurial" /></a>\n
710 s> </div>\n
710 s> </div>\n
711 s> <ul>\n
711 s> <ul>\n
712 s> <li class="active">log</li>\n
712 s> <li class="active">log</li>\n
713 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
713 s> <li><a href="/redirected/graph/tip">graph</a></li>\n
714 s> <li><a href="/redirected/tags">tags</a
714 s> <li><a href="/redirected/tags">tags</a
715 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
715 s> GET /redirected?cmd=capabilities HTTP/1.1\r\n
716 s> Accept-Encoding: identity\r\n
716 s> Accept-Encoding: identity\r\n
717 s> accept: application/mercurial-0.1\r\n
717 s> accept: application/mercurial-0.1\r\n
718 s> host: $LOCALIP:$HGPORT\r\n (glob)
718 s> host: $LOCALIP:$HGPORT\r\n (glob)
719 s> user-agent: Mercurial debugwireproto\r\n
719 s> user-agent: Mercurial debugwireproto\r\n
720 s> \r\n
720 s> \r\n
721 s> makefile('rb', None)
721 s> makefile('rb', None)
722 s> HTTP/1.1 200 Script output follows\r\n
722 s> HTTP/1.1 200 Script output follows\r\n
723 s> Server: testing stub value\r\n
723 s> Server: testing stub value\r\n
724 s> Date: $HTTP_DATE$\r\n
724 s> Date: $HTTP_DATE$\r\n
725 s> Content-Type: application/mercurial-0.1\r\n
725 s> Content-Type: application/mercurial-0.1\r\n
726 s> Content-Length: 453\r\n
726 s> Content-Length: 453\r\n
727 s> \r\n
727 s> \r\n
728 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
728 real URL is http://$LOCALIP:$HGPORT/redirected (glob)
729 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
729 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
730 sending heads command
730 sending heads command
731 s> GET /redirected?cmd=heads HTTP/1.1\r\n
731 s> GET /redirected?cmd=heads HTTP/1.1\r\n
732 s> Accept-Encoding: identity\r\n
732 s> Accept-Encoding: identity\r\n
733 s> vary: X-HgProto-1\r\n
733 s> vary: X-HgProto-1\r\n
734 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
734 s> x-hgproto-1: 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull\r\n
735 s> accept: application/mercurial-0.1\r\n
735 s> accept: application/mercurial-0.1\r\n
736 s> host: $LOCALIP:$HGPORT\r\n (glob)
736 s> host: $LOCALIP:$HGPORT\r\n (glob)
737 s> user-agent: Mercurial debugwireproto\r\n
737 s> user-agent: Mercurial debugwireproto\r\n
738 s> \r\n
738 s> \r\n
739 s> makefile('rb', None)
739 s> makefile('rb', None)
740 s> HTTP/1.1 200 Script output follows\r\n
740 s> HTTP/1.1 200 Script output follows\r\n
741 s> Server: testing stub value\r\n
741 s> Server: testing stub value\r\n
742 s> Date: $HTTP_DATE$\r\n
742 s> Date: $HTTP_DATE$\r\n
743 s> Content-Type: application/mercurial-0.1\r\n
743 s> Content-Type: application/mercurial-0.1\r\n
744 s> Content-Length: 41\r\n
744 s> Content-Length: 41\r\n
745 s> \r\n
745 s> \r\n
746 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
746 s> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n
747 response: [
747 response: [
748 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
748 b'\x96\xee\x1dsT\xc4\xadsr\x04vr\xc3j\x1fV\x1e:jL'
749 ]
749 ]
@@ -1,428 +1,472
1 #require no-chg
1 #require no-chg
2
2
3 $ . $TESTDIR/wireprotohelpers.sh
3 $ . $TESTDIR/wireprotohelpers.sh
4
4
5 $ hg init server
5 $ hg init server
6
6
7 zstd isn't present in plain builds. Make tests easier by removing
7 zstd isn't present in plain builds. Make tests easier by removing
8 zstd from the equation.
8 zstd from the equation.
9
9
10 $ cat >> server/.hg/hgrc << EOF
10 $ cat >> server/.hg/hgrc << EOF
11 > [server]
11 > [server]
12 > compressionengines = zlib
12 > compressionengines = zlib
13 > EOF
13 > EOF
14
14
15 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
15 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
16 $ cat hg.pid > $DAEMON_PIDS
16 $ cat hg.pid > $DAEMON_PIDS
17
17
18 A normal capabilities request is serviced for version 1
18 A normal capabilities request is serviced for version 1
19
19
20 $ sendhttpraw << EOF
20 $ sendhttpraw << EOF
21 > httprequest GET ?cmd=capabilities
21 > httprequest GET ?cmd=capabilities
22 > user-agent: test
22 > user-agent: test
23 > EOF
23 > EOF
24 using raw connection to peer
24 using raw connection to peer
25 s> GET /?cmd=capabilities HTTP/1.1\r\n
25 s> GET /?cmd=capabilities HTTP/1.1\r\n
26 s> Accept-Encoding: identity\r\n
26 s> Accept-Encoding: identity\r\n
27 s> user-agent: test\r\n
27 s> user-agent: test\r\n
28 s> host: $LOCALIP:$HGPORT\r\n (glob)
28 s> host: $LOCALIP:$HGPORT\r\n (glob)
29 s> \r\n
29 s> \r\n
30 s> makefile('rb', None)
30 s> makefile('rb', None)
31 s> HTTP/1.1 200 Script output follows\r\n
31 s> HTTP/1.1 200 Script output follows\r\n
32 s> Server: testing stub value\r\n
32 s> Server: testing stub value\r\n
33 s> Date: $HTTP_DATE$\r\n
33 s> Date: $HTTP_DATE$\r\n
34 s> Content-Type: application/mercurial-0.1\r\n
34 s> Content-Type: application/mercurial-0.1\r\n
35 s> Content-Length: *\r\n (glob)
35 s> Content-Length: *\r\n (glob)
36 s> \r\n
36 s> \r\n
37 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
37 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
38
38
39 A proper request without the API server enabled returns the legacy response
39 A proper request without the API server enabled returns the legacy response
40
40
41 $ sendhttpraw << EOF
41 $ sendhttpraw << EOF
42 > httprequest GET ?cmd=capabilities
42 > httprequest GET ?cmd=capabilities
43 > user-agent: test
43 > user-agent: test
44 > x-hgupgrade-1: foo
44 > x-hgupgrade-1: foo
45 > x-hgproto-1: cbor
45 > x-hgproto-1: cbor
46 > EOF
46 > EOF
47 using raw connection to peer
47 using raw connection to peer
48 s> GET /?cmd=capabilities HTTP/1.1\r\n
48 s> GET /?cmd=capabilities HTTP/1.1\r\n
49 s> Accept-Encoding: identity\r\n
49 s> Accept-Encoding: identity\r\n
50 s> user-agent: test\r\n
50 s> user-agent: test\r\n
51 s> x-hgproto-1: cbor\r\n
51 s> x-hgproto-1: cbor\r\n
52 s> x-hgupgrade-1: foo\r\n
52 s> x-hgupgrade-1: foo\r\n
53 s> host: $LOCALIP:$HGPORT\r\n (glob)
53 s> host: $LOCALIP:$HGPORT\r\n (glob)
54 s> \r\n
54 s> \r\n
55 s> makefile('rb', None)
55 s> makefile('rb', None)
56 s> HTTP/1.1 200 Script output follows\r\n
56 s> HTTP/1.1 200 Script output follows\r\n
57 s> Server: testing stub value\r\n
57 s> Server: testing stub value\r\n
58 s> Date: $HTTP_DATE$\r\n
58 s> Date: $HTTP_DATE$\r\n
59 s> Content-Type: application/mercurial-0.1\r\n
59 s> Content-Type: application/mercurial-0.1\r\n
60 s> Content-Length: *\r\n (glob)
60 s> Content-Length: *\r\n (glob)
61 s> \r\n
61 s> \r\n
62 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
62 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
63
63
64 Restart with just API server enabled. This enables serving the new format.
64 Restart with just API server enabled. This enables serving the new format.
65
65
66 $ killdaemons.py
66 $ killdaemons.py
67 $ cat error.log
67 $ cat error.log
68
68
69 $ cat >> server/.hg/hgrc << EOF
69 $ cat >> server/.hg/hgrc << EOF
70 > [experimental]
70 > [experimental]
71 > web.apiserver = true
71 > web.apiserver = true
72 > EOF
72 > EOF
73
73
74 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
74 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
75 $ cat hg.pid > $DAEMON_PIDS
75 $ cat hg.pid > $DAEMON_PIDS
76
76
77 X-HgUpgrade-<N> without CBOR advertisement uses legacy response
77 X-HgUpgrade-<N> without CBOR advertisement uses legacy response
78
78
79 $ sendhttpraw << EOF
79 $ sendhttpraw << EOF
80 > httprequest GET ?cmd=capabilities
80 > httprequest GET ?cmd=capabilities
81 > user-agent: test
81 > user-agent: test
82 > x-hgupgrade-1: foo bar
82 > x-hgupgrade-1: foo bar
83 > EOF
83 > EOF
84 using raw connection to peer
84 using raw connection to peer
85 s> GET /?cmd=capabilities HTTP/1.1\r\n
85 s> GET /?cmd=capabilities HTTP/1.1\r\n
86 s> Accept-Encoding: identity\r\n
86 s> Accept-Encoding: identity\r\n
87 s> user-agent: test\r\n
87 s> user-agent: test\r\n
88 s> x-hgupgrade-1: foo bar\r\n
88 s> x-hgupgrade-1: foo bar\r\n
89 s> host: $LOCALIP:$HGPORT\r\n (glob)
89 s> host: $LOCALIP:$HGPORT\r\n (glob)
90 s> \r\n
90 s> \r\n
91 s> makefile('rb', None)
91 s> makefile('rb', None)
92 s> HTTP/1.1 200 Script output follows\r\n
92 s> HTTP/1.1 200 Script output follows\r\n
93 s> Server: testing stub value\r\n
93 s> Server: testing stub value\r\n
94 s> Date: $HTTP_DATE$\r\n
94 s> Date: $HTTP_DATE$\r\n
95 s> Content-Type: application/mercurial-0.1\r\n
95 s> Content-Type: application/mercurial-0.1\r\n
96 s> Content-Length: *\r\n (glob)
96 s> Content-Length: *\r\n (glob)
97 s> \r\n
97 s> \r\n
98 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
98 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
99
99
100 X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
100 X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
101
101
102 $ sendhttpraw << EOF
102 $ sendhttpraw << EOF
103 > httprequest GET ?cmd=capabilities
103 > httprequest GET ?cmd=capabilities
104 > user-agent: test
104 > user-agent: test
105 > x-hgupgrade-1: foo bar
105 > x-hgupgrade-1: foo bar
106 > x-hgproto-1: some value
106 > x-hgproto-1: some value
107 > EOF
107 > EOF
108 using raw connection to peer
108 using raw connection to peer
109 s> GET /?cmd=capabilities HTTP/1.1\r\n
109 s> GET /?cmd=capabilities HTTP/1.1\r\n
110 s> Accept-Encoding: identity\r\n
110 s> Accept-Encoding: identity\r\n
111 s> user-agent: test\r\n
111 s> user-agent: test\r\n
112 s> x-hgproto-1: some value\r\n
112 s> x-hgproto-1: some value\r\n
113 s> x-hgupgrade-1: foo bar\r\n
113 s> x-hgupgrade-1: foo bar\r\n
114 s> host: $LOCALIP:$HGPORT\r\n (glob)
114 s> host: $LOCALIP:$HGPORT\r\n (glob)
115 s> \r\n
115 s> \r\n
116 s> makefile('rb', None)
116 s> makefile('rb', None)
117 s> HTTP/1.1 200 Script output follows\r\n
117 s> HTTP/1.1 200 Script output follows\r\n
118 s> Server: testing stub value\r\n
118 s> Server: testing stub value\r\n
119 s> Date: $HTTP_DATE$\r\n
119 s> Date: $HTTP_DATE$\r\n
120 s> Content-Type: application/mercurial-0.1\r\n
120 s> Content-Type: application/mercurial-0.1\r\n
121 s> Content-Length: *\r\n (glob)
121 s> Content-Length: *\r\n (glob)
122 s> \r\n
122 s> \r\n
123 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
123 s> batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
124
124
125 X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
125 X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
126
126
127 $ sendhttpraw << EOF
127 $ sendhttpraw << EOF
128 > httprequest GET ?cmd=capabilities
128 > httprequest GET ?cmd=capabilities
129 > user-agent: test
129 > user-agent: test
130 > x-hgupgrade-1: foo bar
130 > x-hgupgrade-1: foo bar
131 > x-hgproto-1: cbor
131 > x-hgproto-1: cbor
132 > EOF
132 > EOF
133 using raw connection to peer
133 using raw connection to peer
134 s> GET /?cmd=capabilities HTTP/1.1\r\n
134 s> GET /?cmd=capabilities HTTP/1.1\r\n
135 s> Accept-Encoding: identity\r\n
135 s> Accept-Encoding: identity\r\n
136 s> user-agent: test\r\n
136 s> user-agent: test\r\n
137 s> x-hgproto-1: cbor\r\n
137 s> x-hgproto-1: cbor\r\n
138 s> x-hgupgrade-1: foo bar\r\n
138 s> x-hgupgrade-1: foo bar\r\n
139 s> host: $LOCALIP:$HGPORT\r\n (glob)
139 s> host: $LOCALIP:$HGPORT\r\n (glob)
140 s> \r\n
140 s> \r\n
141 s> makefile('rb', None)
141 s> makefile('rb', None)
142 s> HTTP/1.1 200 OK\r\n
142 s> HTTP/1.1 200 OK\r\n
143 s> Server: testing stub value\r\n
143 s> Server: testing stub value\r\n
144 s> Date: $HTTP_DATE$\r\n
144 s> Date: $HTTP_DATE$\r\n
145 s> Content-Type: application/mercurial-cbor\r\n
145 s> Content-Type: application/mercurial-cbor\r\n
146 s> Content-Length: *\r\n (glob)
146 s> Content-Length: *\r\n (glob)
147 s> \r\n
147 s> \r\n
148 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
148 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
149 cbor> {
149 cbor> {
150 b'apibase': b'api/',
150 b'apibase': b'api/',
151 b'apis': {},
151 b'apis': {},
152 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
152 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
153 }
153 }
154
154
155 Restart server to enable HTTPv2
155 Restart server to enable HTTPv2
156
156
157 $ killdaemons.py
157 $ killdaemons.py
158 $ enablehttpv2 server
158 $ enablehttpv2 server
159 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
159 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
160 $ cat hg.pid > $DAEMON_PIDS
160 $ cat hg.pid > $DAEMON_PIDS
161
161
162 Only requested API services are returned
162 Only requested API services are returned
163
163
164 $ sendhttpraw << EOF
164 $ sendhttpraw << EOF
165 > httprequest GET ?cmd=capabilities
165 > httprequest GET ?cmd=capabilities
166 > user-agent: test
166 > user-agent: test
167 > x-hgupgrade-1: foo bar
167 > x-hgupgrade-1: foo bar
168 > x-hgproto-1: cbor
168 > x-hgproto-1: cbor
169 > EOF
169 > EOF
170 using raw connection to peer
170 using raw connection to peer
171 s> GET /?cmd=capabilities HTTP/1.1\r\n
171 s> GET /?cmd=capabilities HTTP/1.1\r\n
172 s> Accept-Encoding: identity\r\n
172 s> Accept-Encoding: identity\r\n
173 s> user-agent: test\r\n
173 s> user-agent: test\r\n
174 s> x-hgproto-1: cbor\r\n
174 s> x-hgproto-1: cbor\r\n
175 s> x-hgupgrade-1: foo bar\r\n
175 s> x-hgupgrade-1: foo bar\r\n
176 s> host: $LOCALIP:$HGPORT\r\n (glob)
176 s> host: $LOCALIP:$HGPORT\r\n (glob)
177 s> \r\n
177 s> \r\n
178 s> makefile('rb', None)
178 s> makefile('rb', None)
179 s> HTTP/1.1 200 OK\r\n
179 s> HTTP/1.1 200 OK\r\n
180 s> Server: testing stub value\r\n
180 s> Server: testing stub value\r\n
181 s> Date: $HTTP_DATE$\r\n
181 s> Date: $HTTP_DATE$\r\n
182 s> Content-Type: application/mercurial-cbor\r\n
182 s> Content-Type: application/mercurial-cbor\r\n
183 s> Content-Length: *\r\n (glob)
183 s> Content-Length: *\r\n (glob)
184 s> \r\n
184 s> \r\n
185 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
185 s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
186 cbor> {
186 cbor> {
187 b'apibase': b'api/',
187 b'apibase': b'api/',
188 b'apis': {},
188 b'apis': {},
189 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
189 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
190 }
190 }
191
191
192 Request for HTTPv2 service returns information about it
192 Request for HTTPv2 service returns information about it
193
193
194 $ sendhttpraw << EOF
194 $ sendhttpraw << EOF
195 > httprequest GET ?cmd=capabilities
195 > httprequest GET ?cmd=capabilities
196 > user-agent: test
196 > user-agent: test
197 > x-hgupgrade-1: exp-http-v2-0001 foo bar
197 > x-hgupgrade-1: exp-http-v2-0001 foo bar
198 > x-hgproto-1: cbor
198 > x-hgproto-1: cbor
199 > EOF
199 > EOF
200 using raw connection to peer
200 using raw connection to peer
201 s> GET /?cmd=capabilities HTTP/1.1\r\n
201 s> GET /?cmd=capabilities HTTP/1.1\r\n
202 s> Accept-Encoding: identity\r\n
202 s> Accept-Encoding: identity\r\n
203 s> user-agent: test\r\n
203 s> user-agent: test\r\n
204 s> x-hgproto-1: cbor\r\n
204 s> x-hgproto-1: cbor\r\n
205 s> x-hgupgrade-1: exp-http-v2-0001 foo bar\r\n
205 s> x-hgupgrade-1: exp-http-v2-0001 foo bar\r\n
206 s> host: $LOCALIP:$HGPORT\r\n (glob)
206 s> host: $LOCALIP:$HGPORT\r\n (glob)
207 s> \r\n
207 s> \r\n
208 s> makefile('rb', None)
208 s> makefile('rb', None)
209 s> HTTP/1.1 200 OK\r\n
209 s> HTTP/1.1 200 OK\r\n
210 s> Server: testing stub value\r\n
210 s> Server: testing stub value\r\n
211 s> Date: $HTTP_DATE$\r\n
211 s> Date: $HTTP_DATE$\r\n
212 s> Content-Type: application/mercurial-cbor\r\n
212 s> Content-Type: application/mercurial-cbor\r\n
213 s> Content-Length: *\r\n (glob)
213 s> Content-Length: *\r\n (glob)
214 s> \r\n
214 s> \r\n
215 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
215 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
216 cbor> {
216 cbor> {
217 b'apibase': b'api/',
217 b'apibase': b'api/',
218 b'apis': {
218 b'apis': {
219 b'exp-http-v2-0001': {
219 b'exp-http-v2-0001': {
220 b'commands': {
220 b'commands': {
221 b'branchmap': {
221 b'branchmap': {
222 b'args': {},
222 b'args': {},
223 b'permissions': [
223 b'permissions': [
224 b'pull'
224 b'pull'
225 ]
225 ]
226 },
226 },
227 b'capabilities': {
227 b'capabilities': {
228 b'args': {},
228 b'args': {},
229 b'permissions': [
229 b'permissions': [
230 b'pull'
230 b'pull'
231 ]
231 ]
232 },
232 },
233 b'changesetdata': {
234 b'args': {
235 b'fields': set([
236 b'parents',
237 b'revision'
238 ]),
239 b'noderange': [
240 [
241 b'0123456...'
242 ],
243 [
244 b'abcdef...'
245 ]
246 ],
247 b'nodes': [
248 b'0123456...'
249 ]
250 },
251 b'permissions': [
252 b'pull'
253 ]
254 },
233 b'heads': {
255 b'heads': {
234 b'args': {
256 b'args': {
235 b'publiconly': False
257 b'publiconly': False
236 },
258 },
237 b'permissions': [
259 b'permissions': [
238 b'pull'
260 b'pull'
239 ]
261 ]
240 },
262 },
241 b'known': {
263 b'known': {
242 b'args': {
264 b'args': {
243 b'nodes': [
265 b'nodes': [
244 b'deadbeef'
266 b'deadbeef'
245 ]
267 ]
246 },
268 },
247 b'permissions': [
269 b'permissions': [
248 b'pull'
270 b'pull'
249 ]
271 ]
250 },
272 },
251 b'listkeys': {
273 b'listkeys': {
252 b'args': {
274 b'args': {
253 b'namespace': b'ns'
275 b'namespace': b'ns'
254 },
276 },
255 b'permissions': [
277 b'permissions': [
256 b'pull'
278 b'pull'
257 ]
279 ]
258 },
280 },
259 b'lookup': {
281 b'lookup': {
260 b'args': {
282 b'args': {
261 b'key': b'foo'
283 b'key': b'foo'
262 },
284 },
263 b'permissions': [
285 b'permissions': [
264 b'pull'
286 b'pull'
265 ]
287 ]
266 },
288 },
267 b'pushkey': {
289 b'pushkey': {
268 b'args': {
290 b'args': {
269 b'key': b'key',
291 b'key': b'key',
270 b'namespace': b'ns',
292 b'namespace': b'ns',
271 b'new': b'new',
293 b'new': b'new',
272 b'old': b'old'
294 b'old': b'old'
273 },
295 },
274 b'permissions': [
296 b'permissions': [
275 b'push'
297 b'push'
276 ]
298 ]
277 }
299 }
278 },
300 },
279 b'compression': [
301 b'compression': [
280 {
302 {
281 b'name': b'zlib'
303 b'name': b'zlib'
282 }
304 }
283 ],
305 ],
284 b'framingmediatypes': [
306 b'framingmediatypes': [
285 b'application/mercurial-exp-framing-0005'
307 b'application/mercurial-exp-framing-0005'
286 ],
308 ],
287 b'rawrepoformats': [
309 b'rawrepoformats': [
288 b'generaldelta',
310 b'generaldelta',
289 b'revlogv1'
311 b'revlogv1'
290 ]
312 ]
291 }
313 }
292 },
314 },
293 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
315 b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
294 }
316 }
295
317
296 capabilities command returns expected info
318 capabilities command returns expected info
297
319
298 $ sendhttpv2peerhandshake << EOF
320 $ sendhttpv2peerhandshake << EOF
299 > command capabilities
321 > command capabilities
300 > EOF
322 > EOF
301 creating http peer for wire protocol version 2
323 creating http peer for wire protocol version 2
302 s> GET /?cmd=capabilities HTTP/1.1\r\n
324 s> GET /?cmd=capabilities HTTP/1.1\r\n
303 s> Accept-Encoding: identity\r\n
325 s> Accept-Encoding: identity\r\n
304 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
326 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
305 s> x-hgproto-1: cbor\r\n
327 s> x-hgproto-1: cbor\r\n
306 s> x-hgupgrade-1: exp-http-v2-0001\r\n
328 s> x-hgupgrade-1: exp-http-v2-0001\r\n
307 s> accept: application/mercurial-0.1\r\n
329 s> accept: application/mercurial-0.1\r\n
308 s> host: $LOCALIP:$HGPORT\r\n (glob)
330 s> host: $LOCALIP:$HGPORT\r\n (glob)
309 s> user-agent: Mercurial debugwireproto\r\n
331 s> user-agent: Mercurial debugwireproto\r\n
310 s> \r\n
332 s> \r\n
311 s> makefile('rb', None)
333 s> makefile('rb', None)
312 s> HTTP/1.1 200 OK\r\n
334 s> HTTP/1.1 200 OK\r\n
313 s> Server: testing stub value\r\n
335 s> Server: testing stub value\r\n
314 s> Date: $HTTP_DATE$\r\n
336 s> Date: $HTTP_DATE$\r\n
315 s> Content-Type: application/mercurial-cbor\r\n
337 s> Content-Type: application/mercurial-cbor\r\n
316 s> Content-Length: *\r\n (glob)
338 s> Content-Length: *\r\n (glob)
317 s> \r\n
339 s> \r\n
318 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
340 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0001\xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xc5batch branchmap $USUAL_BUNDLE2_CAPS_SERVER$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
319 sending capabilities command
341 sending capabilities command
320 s> POST /api/exp-http-v2-0001/ro/capabilities HTTP/1.1\r\n
342 s> POST /api/exp-http-v2-0001/ro/capabilities HTTP/1.1\r\n
321 s> Accept-Encoding: identity\r\n
343 s> Accept-Encoding: identity\r\n
322 s> *\r\n (glob)
344 s> accept: application/mercurial-exp-framing-0005\r\n
323 s> content-type: application/mercurial-exp-framing-0005\r\n
345 s> content-type: application/mercurial-exp-framing-0005\r\n
324 s> content-length: 27\r\n
346 s> content-length: 27\r\n
325 s> host: $LOCALIP:$HGPORT\r\n (glob)
347 s> host: $LOCALIP:$HGPORT\r\n (glob)
326 s> user-agent: Mercurial debugwireproto\r\n
348 s> user-agent: Mercurial debugwireproto\r\n
327 s> \r\n
349 s> \r\n
328 s> \x13\x00\x00\x01\x00\x01\x01\x11\xa1DnameLcapabilities
350 s> \x13\x00\x00\x01\x00\x01\x01\x11\xa1DnameLcapabilities
329 s> makefile('rb', None)
351 s> makefile('rb', None)
330 s> HTTP/1.1 200 OK\r\n
352 s> HTTP/1.1 200 OK\r\n
331 s> Server: testing stub value\r\n
353 s> Server: testing stub value\r\n
332 s> Date: $HTTP_DATE$\r\n
354 s> Date: $HTTP_DATE$\r\n
333 s> Content-Type: application/mercurial-exp-framing-0005\r\n
355 s> Content-Type: application/mercurial-exp-framing-0005\r\n
334 s> Transfer-Encoding: chunked\r\n
356 s> Transfer-Encoding: chunked\r\n
335 s> \r\n
357 s> \r\n
336 s> 13\r\n
358 s> 13\r\n
337 s> \x0b\x00\x00\x01\x00\x02\x011
359 s> \x0b\x00\x00\x01\x00\x02\x011
338 s> \xa1FstatusBok
360 s> \xa1FstatusBok
339 s> \r\n
361 s> \r\n
340 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
362 received frame(size=11; request=1; stream=2; streamflags=stream-begin; type=command-response; flags=continuation)
341 s> 1cc\r\n
363 s> 243\r\n
342 s> \xc4\x01\x00\x01\x00\x02\x001
364 s> ;\x02\x00\x01\x00\x02\x001
343 s> \xa4Hcommands\xa7Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1
365 s> \xa4Hcommands\xa8Ibranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa3Ffields\xd9\x01\x02\x82GparentsHrevisionInoderange\x82\x81J0123456...\x81Iabcdef...Enodes\x81J0123456...Kpermissions\x81DpullEheads\xa2Dargs\xa1Jpubliconly\xf4Kpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\x81HdeadbeefKpermissions\x81DpullHlistkeys\xa2Dargs\xa1InamespaceBnsKpermissions\x81DpullFlookup\xa2Dargs\xa1CkeyCfooKpermissions\x81DpullGpushkey\xa2Dargs\xa4CkeyCkeyInamespaceBnsCnewCnewColdColdKpermissions\x81DpushKcompression\x81\xa1DnameDzlibQframingmediatypes\x81X&application/mercurial-exp-framing-0005Nrawrepoformats\x82LgeneraldeltaHrevlogv1
344 s> \r\n
366 s> \r\n
345 received frame(size=452; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
367 received frame(size=571; request=1; stream=2; streamflags=; type=command-response; flags=continuation)
346 s> 8\r\n
368 s> 8\r\n
347 s> \x00\x00\x00\x01\x00\x02\x002
369 s> \x00\x00\x00\x01\x00\x02\x002
348 s> \r\n
370 s> \r\n
349 s> 0\r\n
371 s> 0\r\n
350 s> \r\n
372 s> \r\n
351 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
373 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
352 response: gen[
374 response: gen[
353 {
375 {
354 b'commands': {
376 b'commands': {
355 b'branchmap': {
377 b'branchmap': {
356 b'args': {},
378 b'args': {},
357 b'permissions': [
379 b'permissions': [
358 b'pull'
380 b'pull'
359 ]
381 ]
360 },
382 },
361 b'capabilities': {
383 b'capabilities': {
362 b'args': {},
384 b'args': {},
363 b'permissions': [
385 b'permissions': [
364 b'pull'
386 b'pull'
365 ]
387 ]
366 },
388 },
389 b'changesetdata': {
390 b'args': {
391 b'fields': set([
392 b'parents',
393 b'revision'
394 ]),
395 b'noderange': [
396 [
397 b'0123456...'
398 ],
399 [
400 b'abcdef...'
401 ]
402 ],
403 b'nodes': [
404 b'0123456...'
405 ]
406 },
407 b'permissions': [
408 b'pull'
409 ]
410 },
367 b'heads': {
411 b'heads': {
368 b'args': {
412 b'args': {
369 b'publiconly': False
413 b'publiconly': False
370 },
414 },
371 b'permissions': [
415 b'permissions': [
372 b'pull'
416 b'pull'
373 ]
417 ]
374 },
418 },
375 b'known': {
419 b'known': {
376 b'args': {
420 b'args': {
377 b'nodes': [
421 b'nodes': [
378 b'deadbeef'
422 b'deadbeef'
379 ]
423 ]
380 },
424 },
381 b'permissions': [
425 b'permissions': [
382 b'pull'
426 b'pull'
383 ]
427 ]
384 },
428 },
385 b'listkeys': {
429 b'listkeys': {
386 b'args': {
430 b'args': {
387 b'namespace': b'ns'
431 b'namespace': b'ns'
388 },
432 },
389 b'permissions': [
433 b'permissions': [
390 b'pull'
434 b'pull'
391 ]
435 ]
392 },
436 },
393 b'lookup': {
437 b'lookup': {
394 b'args': {
438 b'args': {
395 b'key': b'foo'
439 b'key': b'foo'
396 },
440 },
397 b'permissions': [
441 b'permissions': [
398 b'pull'
442 b'pull'
399 ]
443 ]
400 },
444 },
401 b'pushkey': {
445 b'pushkey': {
402 b'args': {
446 b'args': {
403 b'key': b'key',
447 b'key': b'key',
404 b'namespace': b'ns',
448 b'namespace': b'ns',
405 b'new': b'new',
449 b'new': b'new',
406 b'old': b'old'
450 b'old': b'old'
407 },
451 },
408 b'permissions': [
452 b'permissions': [
409 b'push'
453 b'push'
410 ]
454 ]
411 }
455 }
412 },
456 },
413 b'compression': [
457 b'compression': [
414 {
458 {
415 b'name': b'zlib'
459 b'name': b'zlib'
416 }
460 }
417 ],
461 ],
418 b'framingmediatypes': [
462 b'framingmediatypes': [
419 b'application/mercurial-exp-framing-0005'
463 b'application/mercurial-exp-framing-0005'
420 ],
464 ],
421 b'rawrepoformats': [
465 b'rawrepoformats': [
422 b'generaldelta',
466 b'generaldelta',
423 b'revlogv1'
467 b'revlogv1'
424 ]
468 ]
425 }
469 }
426 ]
470 ]
427
471
428 $ cat error.log
472 $ cat error.log
General Comments 0
You need to be logged in to leave comments. Login now