##// END OF EJS Templates
wireprotov2server: use our JSON encoder...
Gregory Szorc -
r41448:e82288a9 default
parent child Browse files
Show More
@@ -1,1456 +1,1454
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 discovery,
19 discovery,
20 encoding,
20 encoding,
21 error,
21 error,
22 match as matchmod,
22 match as matchmod,
23 narrowspec,
23 narrowspec,
24 pycompat,
24 pycompat,
25 streamclone,
25 streamclone,
26 templatefilters,
26 util,
27 util,
27 wireprotoframing,
28 wireprotoframing,
28 wireprototypes,
29 wireprototypes,
29 )
30 )
30 from .utils import (
31 from .utils import (
31 cborutil,
32 cborutil,
32 interfaceutil,
33 interfaceutil,
33 stringutil,
34 stringutil,
34 )
35 )
35
36
36 FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
37 FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
37
38
38 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
39 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
39
40
40 COMMANDS = wireprototypes.commanddict()
41 COMMANDS = wireprototypes.commanddict()
41
42
42 # Value inserted into cache key computation function. Change the value to
43 # Value inserted into cache key computation function. Change the value to
43 # force new cache keys for every command request. This should be done when
44 # force new cache keys for every command request. This should be done when
44 # there is a change to how caching works, etc.
45 # there is a change to how caching works, etc.
45 GLOBAL_CACHE_VERSION = 1
46 GLOBAL_CACHE_VERSION = 1
46
47
47 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
48 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
48 from .hgweb import common as hgwebcommon
49 from .hgweb import common as hgwebcommon
49
50
50 # URL space looks like: <permissions>/<command>, where <permission> can
51 # URL space looks like: <permissions>/<command>, where <permission> can
51 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
52 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
52
53
53 # Root URL does nothing meaningful... yet.
54 # Root URL does nothing meaningful... yet.
54 if not urlparts:
55 if not urlparts:
55 res.status = b'200 OK'
56 res.status = b'200 OK'
56 res.headers[b'Content-Type'] = b'text/plain'
57 res.headers[b'Content-Type'] = b'text/plain'
57 res.setbodybytes(_('HTTP version 2 API handler'))
58 res.setbodybytes(_('HTTP version 2 API handler'))
58 return
59 return
59
60
60 if len(urlparts) == 1:
61 if len(urlparts) == 1:
61 res.status = b'404 Not Found'
62 res.status = b'404 Not Found'
62 res.headers[b'Content-Type'] = b'text/plain'
63 res.headers[b'Content-Type'] = b'text/plain'
63 res.setbodybytes(_('do not know how to process %s\n') %
64 res.setbodybytes(_('do not know how to process %s\n') %
64 req.dispatchpath)
65 req.dispatchpath)
65 return
66 return
66
67
67 permission, command = urlparts[0:2]
68 permission, command = urlparts[0:2]
68
69
69 if permission not in (b'ro', b'rw'):
70 if permission not in (b'ro', b'rw'):
70 res.status = b'404 Not Found'
71 res.status = b'404 Not Found'
71 res.headers[b'Content-Type'] = b'text/plain'
72 res.headers[b'Content-Type'] = b'text/plain'
72 res.setbodybytes(_('unknown permission: %s') % permission)
73 res.setbodybytes(_('unknown permission: %s') % permission)
73 return
74 return
74
75
75 if req.method != 'POST':
76 if req.method != 'POST':
76 res.status = b'405 Method Not Allowed'
77 res.status = b'405 Method Not Allowed'
77 res.headers[b'Allow'] = b'POST'
78 res.headers[b'Allow'] = b'POST'
78 res.setbodybytes(_('commands require POST requests'))
79 res.setbodybytes(_('commands require POST requests'))
79 return
80 return
80
81
81 # At some point we'll want to use our own API instead of recycling the
82 # At some point we'll want to use our own API instead of recycling the
82 # behavior of version 1 of the wire protocol...
83 # behavior of version 1 of the wire protocol...
83 # TODO return reasonable responses - not responses that overload the
84 # TODO return reasonable responses - not responses that overload the
84 # HTTP status line message for error reporting.
85 # HTTP status line message for error reporting.
85 try:
86 try:
86 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
87 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
87 except hgwebcommon.ErrorResponse as e:
88 except hgwebcommon.ErrorResponse as e:
88 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
89 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
89 for k, v in e.headers:
90 for k, v in e.headers:
90 res.headers[k] = v
91 res.headers[k] = v
91 res.setbodybytes('permission denied')
92 res.setbodybytes('permission denied')
92 return
93 return
93
94
94 # We have a special endpoint to reflect the request back at the client.
95 # We have a special endpoint to reflect the request back at the client.
95 if command == b'debugreflect':
96 if command == b'debugreflect':
96 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
97 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
97 return
98 return
98
99
99 # Extra commands that we handle that aren't really wire protocol
100 # Extra commands that we handle that aren't really wire protocol
100 # commands. Think extra hard before making this hackery available to
101 # commands. Think extra hard before making this hackery available to
101 # extension.
102 # extension.
102 extracommands = {'multirequest'}
103 extracommands = {'multirequest'}
103
104
104 if command not in COMMANDS and command not in extracommands:
105 if command not in COMMANDS and command not in extracommands:
105 res.status = b'404 Not Found'
106 res.status = b'404 Not Found'
106 res.headers[b'Content-Type'] = b'text/plain'
107 res.headers[b'Content-Type'] = b'text/plain'
107 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
108 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
108 return
109 return
109
110
110 repo = rctx.repo
111 repo = rctx.repo
111 ui = repo.ui
112 ui = repo.ui
112
113
113 proto = httpv2protocolhandler(req, ui)
114 proto = httpv2protocolhandler(req, ui)
114
115
115 if (not COMMANDS.commandavailable(command, proto)
116 if (not COMMANDS.commandavailable(command, proto)
116 and command not in extracommands):
117 and command not in extracommands):
117 res.status = b'404 Not Found'
118 res.status = b'404 Not Found'
118 res.headers[b'Content-Type'] = b'text/plain'
119 res.headers[b'Content-Type'] = b'text/plain'
119 res.setbodybytes(_('invalid wire protocol command: %s') % command)
120 res.setbodybytes(_('invalid wire protocol command: %s') % command)
120 return
121 return
121
122
122 # TODO consider cases where proxies may add additional Accept headers.
123 # TODO consider cases where proxies may add additional Accept headers.
123 if req.headers.get(b'Accept') != FRAMINGTYPE:
124 if req.headers.get(b'Accept') != FRAMINGTYPE:
124 res.status = b'406 Not Acceptable'
125 res.status = b'406 Not Acceptable'
125 res.headers[b'Content-Type'] = b'text/plain'
126 res.headers[b'Content-Type'] = b'text/plain'
126 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
127 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
127 % FRAMINGTYPE)
128 % FRAMINGTYPE)
128 return
129 return
129
130
130 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
131 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
131 res.status = b'415 Unsupported Media Type'
132 res.status = b'415 Unsupported Media Type'
132 # TODO we should send a response with appropriate media type,
133 # TODO we should send a response with appropriate media type,
133 # since client does Accept it.
134 # since client does Accept it.
134 res.headers[b'Content-Type'] = b'text/plain'
135 res.headers[b'Content-Type'] = b'text/plain'
135 res.setbodybytes(_('client MUST send Content-Type header with '
136 res.setbodybytes(_('client MUST send Content-Type header with '
136 'value: %s\n') % FRAMINGTYPE)
137 'value: %s\n') % FRAMINGTYPE)
137 return
138 return
138
139
139 _processhttpv2request(ui, repo, req, res, permission, command, proto)
140 _processhttpv2request(ui, repo, req, res, permission, command, proto)
140
141
141 def _processhttpv2reflectrequest(ui, repo, req, res):
142 def _processhttpv2reflectrequest(ui, repo, req, res):
142 """Reads unified frame protocol request and dumps out state to client.
143 """Reads unified frame protocol request and dumps out state to client.
143
144
144 This special endpoint can be used to help debug the wire protocol.
145 This special endpoint can be used to help debug the wire protocol.
145
146
146 Instead of routing the request through the normal dispatch mechanism,
147 Instead of routing the request through the normal dispatch mechanism,
147 we instead read all frames, decode them, and feed them into our state
148 we instead read all frames, decode them, and feed them into our state
148 tracker. We then dump the log of all that activity back out to the
149 tracker. We then dump the log of all that activity back out to the
149 client.
150 client.
150 """
151 """
151 import json
152
153 # Reflection APIs have a history of being abused, accidentally disclosing
152 # Reflection APIs have a history of being abused, accidentally disclosing
154 # sensitive data, etc. So we have a config knob.
153 # sensitive data, etc. So we have a config knob.
155 if not ui.configbool('experimental', 'web.api.debugreflect'):
154 if not ui.configbool('experimental', 'web.api.debugreflect'):
156 res.status = b'404 Not Found'
155 res.status = b'404 Not Found'
157 res.headers[b'Content-Type'] = b'text/plain'
156 res.headers[b'Content-Type'] = b'text/plain'
158 res.setbodybytes(_('debugreflect service not available'))
157 res.setbodybytes(_('debugreflect service not available'))
159 return
158 return
160
159
161 # We assume we have a unified framing protocol request body.
160 # We assume we have a unified framing protocol request body.
162
161
163 reactor = wireprotoframing.serverreactor(ui)
162 reactor = wireprotoframing.serverreactor(ui)
164 states = []
163 states = []
165
164
166 while True:
165 while True:
167 frame = wireprotoframing.readframe(req.bodyfh)
166 frame = wireprotoframing.readframe(req.bodyfh)
168
167
169 if not frame:
168 if not frame:
170 states.append(b'received: <no frame>')
169 states.append(b'received: <no frame>')
171 break
170 break
172
171
173 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
172 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
174 frame.requestid,
173 frame.requestid,
175 frame.payload))
174 frame.payload))
176
175
177 action, meta = reactor.onframerecv(frame)
176 action, meta = reactor.onframerecv(frame)
178 states.append(json.dumps((action, meta), sort_keys=True,
177 states.append(templatefilters.json((action, meta)))
179 separators=(', ', ': ')))
180
178
181 action, meta = reactor.oninputeof()
179 action, meta = reactor.oninputeof()
182 meta['action'] = action
180 meta['action'] = action
183 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
181 states.append(templatefilters.json(meta))
184
182
185 res.status = b'200 OK'
183 res.status = b'200 OK'
186 res.headers[b'Content-Type'] = b'text/plain'
184 res.headers[b'Content-Type'] = b'text/plain'
187 res.setbodybytes(b'\n'.join(states))
185 res.setbodybytes(b'\n'.join(states))
188
186
189 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
187 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
190 """Post-validation handler for HTTPv2 requests.
188 """Post-validation handler for HTTPv2 requests.
191
189
192 Called when the HTTP request contains unified frame-based protocol
190 Called when the HTTP request contains unified frame-based protocol
193 frames for evaluation.
191 frames for evaluation.
194 """
192 """
195 # TODO Some HTTP clients are full duplex and can receive data before
193 # TODO Some HTTP clients are full duplex and can receive data before
196 # the entire request is transmitted. Figure out a way to indicate support
194 # the entire request is transmitted. Figure out a way to indicate support
197 # for that so we can opt into full duplex mode.
195 # for that so we can opt into full duplex mode.
198 reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
196 reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
199 seencommand = False
197 seencommand = False
200
198
201 outstream = None
199 outstream = None
202
200
203 while True:
201 while True:
204 frame = wireprotoframing.readframe(req.bodyfh)
202 frame = wireprotoframing.readframe(req.bodyfh)
205 if not frame:
203 if not frame:
206 break
204 break
207
205
208 action, meta = reactor.onframerecv(frame)
206 action, meta = reactor.onframerecv(frame)
209
207
210 if action == 'wantframe':
208 if action == 'wantframe':
211 # Need more data before we can do anything.
209 # Need more data before we can do anything.
212 continue
210 continue
213 elif action == 'runcommand':
211 elif action == 'runcommand':
214 # Defer creating output stream because we need to wait for
212 # Defer creating output stream because we need to wait for
215 # protocol settings frames so proper encoding can be applied.
213 # protocol settings frames so proper encoding can be applied.
216 if not outstream:
214 if not outstream:
217 outstream = reactor.makeoutputstream()
215 outstream = reactor.makeoutputstream()
218
216
219 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
217 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
220 reqcommand, reactor, outstream,
218 reqcommand, reactor, outstream,
221 meta, issubsequent=seencommand)
219 meta, issubsequent=seencommand)
222
220
223 if sentoutput:
221 if sentoutput:
224 return
222 return
225
223
226 seencommand = True
224 seencommand = True
227
225
228 elif action == 'error':
226 elif action == 'error':
229 # TODO define proper error mechanism.
227 # TODO define proper error mechanism.
230 res.status = b'200 OK'
228 res.status = b'200 OK'
231 res.headers[b'Content-Type'] = b'text/plain'
229 res.headers[b'Content-Type'] = b'text/plain'
232 res.setbodybytes(meta['message'] + b'\n')
230 res.setbodybytes(meta['message'] + b'\n')
233 return
231 return
234 else:
232 else:
235 raise error.ProgrammingError(
233 raise error.ProgrammingError(
236 'unhandled action from frame processor: %s' % action)
234 'unhandled action from frame processor: %s' % action)
237
235
238 action, meta = reactor.oninputeof()
236 action, meta = reactor.oninputeof()
239 if action == 'sendframes':
237 if action == 'sendframes':
240 # We assume we haven't started sending the response yet. If we're
238 # We assume we haven't started sending the response yet. If we're
241 # wrong, the response type will raise an exception.
239 # wrong, the response type will raise an exception.
242 res.status = b'200 OK'
240 res.status = b'200 OK'
243 res.headers[b'Content-Type'] = FRAMINGTYPE
241 res.headers[b'Content-Type'] = FRAMINGTYPE
244 res.setbodygen(meta['framegen'])
242 res.setbodygen(meta['framegen'])
245 elif action == 'noop':
243 elif action == 'noop':
246 pass
244 pass
247 else:
245 else:
248 raise error.ProgrammingError('unhandled action from frame processor: %s'
246 raise error.ProgrammingError('unhandled action from frame processor: %s'
249 % action)
247 % action)
250
248
251 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
249 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
252 outstream, command, issubsequent):
250 outstream, command, issubsequent):
253 """Dispatch a wire protocol command made from HTTPv2 requests.
251 """Dispatch a wire protocol command made from HTTPv2 requests.
254
252
255 The authenticated permission (``authedperm``) along with the original
253 The authenticated permission (``authedperm``) along with the original
256 command from the URL (``reqcommand``) are passed in.
254 command from the URL (``reqcommand``) are passed in.
257 """
255 """
258 # We already validated that the session has permissions to perform the
256 # We already validated that the session has permissions to perform the
259 # actions in ``authedperm``. In the unified frame protocol, the canonical
257 # actions in ``authedperm``. In the unified frame protocol, the canonical
260 # command to run is expressed in a frame. However, the URL also requested
258 # command to run is expressed in a frame. However, the URL also requested
261 # to run a specific command. We need to be careful that the command we
259 # to run a specific command. We need to be careful that the command we
262 # run doesn't have permissions requirements greater than what was granted
260 # run doesn't have permissions requirements greater than what was granted
263 # by ``authedperm``.
261 # by ``authedperm``.
264 #
262 #
265 # Our rule for this is we only allow one command per HTTP request and
263 # Our rule for this is we only allow one command per HTTP request and
266 # that command must match the command in the URL. However, we make
264 # that command must match the command in the URL. However, we make
267 # an exception for the ``multirequest`` URL. This URL is allowed to
265 # an exception for the ``multirequest`` URL. This URL is allowed to
268 # execute multiple commands. We double check permissions of each command
266 # execute multiple commands. We double check permissions of each command
269 # as it is invoked to ensure there is no privilege escalation.
267 # as it is invoked to ensure there is no privilege escalation.
270 # TODO consider allowing multiple commands to regular command URLs
268 # TODO consider allowing multiple commands to regular command URLs
271 # iff each command is the same.
269 # iff each command is the same.
272
270
273 proto = httpv2protocolhandler(req, ui, args=command['args'])
271 proto = httpv2protocolhandler(req, ui, args=command['args'])
274
272
275 if reqcommand == b'multirequest':
273 if reqcommand == b'multirequest':
276 if not COMMANDS.commandavailable(command['command'], proto):
274 if not COMMANDS.commandavailable(command['command'], proto):
277 # TODO proper error mechanism
275 # TODO proper error mechanism
278 res.status = b'200 OK'
276 res.status = b'200 OK'
279 res.headers[b'Content-Type'] = b'text/plain'
277 res.headers[b'Content-Type'] = b'text/plain'
280 res.setbodybytes(_('wire protocol command not available: %s') %
278 res.setbodybytes(_('wire protocol command not available: %s') %
281 command['command'])
279 command['command'])
282 return True
280 return True
283
281
284 # TODO don't use assert here, since it may be elided by -O.
282 # TODO don't use assert here, since it may be elided by -O.
285 assert authedperm in (b'ro', b'rw')
283 assert authedperm in (b'ro', b'rw')
286 wirecommand = COMMANDS[command['command']]
284 wirecommand = COMMANDS[command['command']]
287 assert wirecommand.permission in ('push', 'pull')
285 assert wirecommand.permission in ('push', 'pull')
288
286
289 if authedperm == b'ro' and wirecommand.permission != 'pull':
287 if authedperm == b'ro' and wirecommand.permission != 'pull':
290 # TODO proper error mechanism
288 # TODO proper error mechanism
291 res.status = b'403 Forbidden'
289 res.status = b'403 Forbidden'
292 res.headers[b'Content-Type'] = b'text/plain'
290 res.headers[b'Content-Type'] = b'text/plain'
293 res.setbodybytes(_('insufficient permissions to execute '
291 res.setbodybytes(_('insufficient permissions to execute '
294 'command: %s') % command['command'])
292 'command: %s') % command['command'])
295 return True
293 return True
296
294
297 # TODO should we also call checkperm() here? Maybe not if we're going
295 # TODO should we also call checkperm() here? Maybe not if we're going
298 # to overhaul that API. The granted scope from the URL check should
296 # to overhaul that API. The granted scope from the URL check should
299 # be good enough.
297 # be good enough.
300
298
301 else:
299 else:
302 # Don't allow multiple commands outside of ``multirequest`` URL.
300 # Don't allow multiple commands outside of ``multirequest`` URL.
303 if issubsequent:
301 if issubsequent:
304 # TODO proper error mechanism
302 # TODO proper error mechanism
305 res.status = b'200 OK'
303 res.status = b'200 OK'
306 res.headers[b'Content-Type'] = b'text/plain'
304 res.headers[b'Content-Type'] = b'text/plain'
307 res.setbodybytes(_('multiple commands cannot be issued to this '
305 res.setbodybytes(_('multiple commands cannot be issued to this '
308 'URL'))
306 'URL'))
309 return True
307 return True
310
308
311 if reqcommand != command['command']:
309 if reqcommand != command['command']:
312 # TODO define proper error mechanism
310 # TODO define proper error mechanism
313 res.status = b'200 OK'
311 res.status = b'200 OK'
314 res.headers[b'Content-Type'] = b'text/plain'
312 res.headers[b'Content-Type'] = b'text/plain'
315 res.setbodybytes(_('command in frame must match command in URL'))
313 res.setbodybytes(_('command in frame must match command in URL'))
316 return True
314 return True
317
315
318 res.status = b'200 OK'
316 res.status = b'200 OK'
319 res.headers[b'Content-Type'] = FRAMINGTYPE
317 res.headers[b'Content-Type'] = FRAMINGTYPE
320
318
321 try:
319 try:
322 objs = dispatch(repo, proto, command['command'], command['redirect'])
320 objs = dispatch(repo, proto, command['command'], command['redirect'])
323
321
324 action, meta = reactor.oncommandresponsereadyobjects(
322 action, meta = reactor.oncommandresponsereadyobjects(
325 outstream, command['requestid'], objs)
323 outstream, command['requestid'], objs)
326
324
327 except error.WireprotoCommandError as e:
325 except error.WireprotoCommandError as e:
328 action, meta = reactor.oncommanderror(
326 action, meta = reactor.oncommanderror(
329 outstream, command['requestid'], e.message, e.messageargs)
327 outstream, command['requestid'], e.message, e.messageargs)
330
328
331 except Exception as e:
329 except Exception as e:
332 action, meta = reactor.onservererror(
330 action, meta = reactor.onservererror(
333 outstream, command['requestid'],
331 outstream, command['requestid'],
334 _('exception when invoking command: %s') %
332 _('exception when invoking command: %s') %
335 stringutil.forcebytestr(e))
333 stringutil.forcebytestr(e))
336
334
337 if action == 'sendframes':
335 if action == 'sendframes':
338 res.setbodygen(meta['framegen'])
336 res.setbodygen(meta['framegen'])
339 return True
337 return True
340 elif action == 'noop':
338 elif action == 'noop':
341 return False
339 return False
342 else:
340 else:
343 raise error.ProgrammingError('unhandled event from reactor: %s' %
341 raise error.ProgrammingError('unhandled event from reactor: %s' %
344 action)
342 action)
345
343
346 def getdispatchrepo(repo, proto, command):
344 def getdispatchrepo(repo, proto, command):
347 return repo.filtered('served')
345 return repo.filtered('served')
348
346
349 def dispatch(repo, proto, command, redirect):
347 def dispatch(repo, proto, command, redirect):
350 """Run a wire protocol command.
348 """Run a wire protocol command.
351
349
352 Returns an iterable of objects that will be sent to the client.
350 Returns an iterable of objects that will be sent to the client.
353 """
351 """
354 repo = getdispatchrepo(repo, proto, command)
352 repo = getdispatchrepo(repo, proto, command)
355
353
356 entry = COMMANDS[command]
354 entry = COMMANDS[command]
357 func = entry.func
355 func = entry.func
358 spec = entry.args
356 spec = entry.args
359
357
360 args = proto.getargs(spec)
358 args = proto.getargs(spec)
361
359
362 # There is some duplicate boilerplate code here for calling the command and
360 # There is some duplicate boilerplate code here for calling the command and
363 # emitting objects. It is either that or a lot of indented code that looks
361 # emitting objects. It is either that or a lot of indented code that looks
364 # like a pyramid (since there are a lot of code paths that result in not
362 # like a pyramid (since there are a lot of code paths that result in not
365 # using the cacher).
363 # using the cacher).
366 callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
364 callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
367
365
368 # Request is not cacheable. Don't bother instantiating a cacher.
366 # Request is not cacheable. Don't bother instantiating a cacher.
369 if not entry.cachekeyfn:
367 if not entry.cachekeyfn:
370 for o in callcommand():
368 for o in callcommand():
371 yield o
369 yield o
372 return
370 return
373
371
374 if redirect:
372 if redirect:
375 redirecttargets = redirect[b'targets']
373 redirecttargets = redirect[b'targets']
376 redirecthashes = redirect[b'hashes']
374 redirecthashes = redirect[b'hashes']
377 else:
375 else:
378 redirecttargets = []
376 redirecttargets = []
379 redirecthashes = []
377 redirecthashes = []
380
378
381 cacher = makeresponsecacher(repo, proto, command, args,
379 cacher = makeresponsecacher(repo, proto, command, args,
382 cborutil.streamencode,
380 cborutil.streamencode,
383 redirecttargets=redirecttargets,
381 redirecttargets=redirecttargets,
384 redirecthashes=redirecthashes)
382 redirecthashes=redirecthashes)
385
383
386 # But we have no cacher. Do default handling.
384 # But we have no cacher. Do default handling.
387 if not cacher:
385 if not cacher:
388 for o in callcommand():
386 for o in callcommand():
389 yield o
387 yield o
390 return
388 return
391
389
392 with cacher:
390 with cacher:
393 cachekey = entry.cachekeyfn(repo, proto, cacher,
391 cachekey = entry.cachekeyfn(repo, proto, cacher,
394 **pycompat.strkwargs(args))
392 **pycompat.strkwargs(args))
395
393
396 # No cache key or the cacher doesn't like it. Do default handling.
394 # No cache key or the cacher doesn't like it. Do default handling.
397 if cachekey is None or not cacher.setcachekey(cachekey):
395 if cachekey is None or not cacher.setcachekey(cachekey):
398 for o in callcommand():
396 for o in callcommand():
399 yield o
397 yield o
400 return
398 return
401
399
402 # Serve it from the cache, if possible.
400 # Serve it from the cache, if possible.
403 cached = cacher.lookup()
401 cached = cacher.lookup()
404
402
405 if cached:
403 if cached:
406 for o in cached['objs']:
404 for o in cached['objs']:
407 yield o
405 yield o
408 return
406 return
409
407
410 # Else call the command and feed its output into the cacher, allowing
408 # Else call the command and feed its output into the cacher, allowing
411 # the cacher to buffer/mutate objects as it desires.
409 # the cacher to buffer/mutate objects as it desires.
412 for o in callcommand():
410 for o in callcommand():
413 for o in cacher.onobject(o):
411 for o in cacher.onobject(o):
414 yield o
412 yield o
415
413
416 for o in cacher.onfinished():
414 for o in cacher.onfinished():
417 yield o
415 yield o
418
416
419 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
417 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
420 class httpv2protocolhandler(object):
418 class httpv2protocolhandler(object):
421 def __init__(self, req, ui, args=None):
419 def __init__(self, req, ui, args=None):
422 self._req = req
420 self._req = req
423 self._ui = ui
421 self._ui = ui
424 self._args = args
422 self._args = args
425
423
426 @property
424 @property
427 def name(self):
425 def name(self):
428 return HTTP_WIREPROTO_V2
426 return HTTP_WIREPROTO_V2
429
427
430 def getargs(self, args):
428 def getargs(self, args):
431 # First look for args that were passed but aren't registered on this
429 # First look for args that were passed but aren't registered on this
432 # command.
430 # command.
433 extra = set(self._args) - set(args)
431 extra = set(self._args) - set(args)
434 if extra:
432 if extra:
435 raise error.WireprotoCommandError(
433 raise error.WireprotoCommandError(
436 'unsupported argument to command: %s' %
434 'unsupported argument to command: %s' %
437 ', '.join(sorted(extra)))
435 ', '.join(sorted(extra)))
438
436
439 # And look for required arguments that are missing.
437 # And look for required arguments that are missing.
440 missing = {a for a in args if args[a]['required']} - set(self._args)
438 missing = {a for a in args if args[a]['required']} - set(self._args)
441
439
442 if missing:
440 if missing:
443 raise error.WireprotoCommandError(
441 raise error.WireprotoCommandError(
444 'missing required arguments: %s' % ', '.join(sorted(missing)))
442 'missing required arguments: %s' % ', '.join(sorted(missing)))
445
443
446 # Now derive the arguments to pass to the command, taking into
444 # Now derive the arguments to pass to the command, taking into
447 # account the arguments specified by the client.
445 # account the arguments specified by the client.
448 data = {}
446 data = {}
449 for k, meta in sorted(args.items()):
447 for k, meta in sorted(args.items()):
450 # This argument wasn't passed by the client.
448 # This argument wasn't passed by the client.
451 if k not in self._args:
449 if k not in self._args:
452 data[k] = meta['default']()
450 data[k] = meta['default']()
453 continue
451 continue
454
452
455 v = self._args[k]
453 v = self._args[k]
456
454
457 # Sets may be expressed as lists. Silently normalize.
455 # Sets may be expressed as lists. Silently normalize.
458 if meta['type'] == 'set' and isinstance(v, list):
456 if meta['type'] == 'set' and isinstance(v, list):
459 v = set(v)
457 v = set(v)
460
458
461 # TODO consider more/stronger type validation.
459 # TODO consider more/stronger type validation.
462
460
463 data[k] = v
461 data[k] = v
464
462
465 return data
463 return data
466
464
467 def getprotocaps(self):
465 def getprotocaps(self):
468 # Protocol capabilities are currently not implemented for HTTP V2.
466 # Protocol capabilities are currently not implemented for HTTP V2.
469 return set()
467 return set()
470
468
471 def getpayload(self):
469 def getpayload(self):
472 raise NotImplementedError
470 raise NotImplementedError
473
471
474 @contextlib.contextmanager
472 @contextlib.contextmanager
475 def mayberedirectstdio(self):
473 def mayberedirectstdio(self):
476 raise NotImplementedError
474 raise NotImplementedError
477
475
478 def client(self):
476 def client(self):
479 raise NotImplementedError
477 raise NotImplementedError
480
478
481 def addcapabilities(self, repo, caps):
479 def addcapabilities(self, repo, caps):
482 return caps
480 return caps
483
481
484 def checkperm(self, perm):
482 def checkperm(self, perm):
485 raise NotImplementedError
483 raise NotImplementedError
486
484
487 def httpv2apidescriptor(req, repo):
485 def httpv2apidescriptor(req, repo):
488 proto = httpv2protocolhandler(req, repo.ui)
486 proto = httpv2protocolhandler(req, repo.ui)
489
487
490 return _capabilitiesv2(repo, proto)
488 return _capabilitiesv2(repo, proto)
491
489
492 def _capabilitiesv2(repo, proto):
490 def _capabilitiesv2(repo, proto):
493 """Obtain the set of capabilities for version 2 transports.
491 """Obtain the set of capabilities for version 2 transports.
494
492
495 These capabilities are distinct from the capabilities for version 1
493 These capabilities are distinct from the capabilities for version 1
496 transports.
494 transports.
497 """
495 """
498 caps = {
496 caps = {
499 'commands': {},
497 'commands': {},
500 'framingmediatypes': [FRAMINGTYPE],
498 'framingmediatypes': [FRAMINGTYPE],
501 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
499 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
502 }
500 }
503
501
504 for command, entry in COMMANDS.items():
502 for command, entry in COMMANDS.items():
505 args = {}
503 args = {}
506
504
507 for arg, meta in entry.args.items():
505 for arg, meta in entry.args.items():
508 args[arg] = {
506 args[arg] = {
509 # TODO should this be a normalized type using CBOR's
507 # TODO should this be a normalized type using CBOR's
510 # terminology?
508 # terminology?
511 b'type': meta['type'],
509 b'type': meta['type'],
512 b'required': meta['required'],
510 b'required': meta['required'],
513 }
511 }
514
512
515 if not meta['required']:
513 if not meta['required']:
516 args[arg][b'default'] = meta['default']()
514 args[arg][b'default'] = meta['default']()
517
515
518 if meta['validvalues']:
516 if meta['validvalues']:
519 args[arg][b'validvalues'] = meta['validvalues']
517 args[arg][b'validvalues'] = meta['validvalues']
520
518
521 # TODO this type of check should be defined in a per-command callback.
519 # TODO this type of check should be defined in a per-command callback.
522 if (command == b'rawstorefiledata'
520 if (command == b'rawstorefiledata'
523 and not streamclone.allowservergeneration(repo)):
521 and not streamclone.allowservergeneration(repo)):
524 continue
522 continue
525
523
526 caps['commands'][command] = {
524 caps['commands'][command] = {
527 'args': args,
525 'args': args,
528 'permissions': [entry.permission],
526 'permissions': [entry.permission],
529 }
527 }
530
528
531 if entry.extracapabilitiesfn:
529 if entry.extracapabilitiesfn:
532 extracaps = entry.extracapabilitiesfn(repo, proto)
530 extracaps = entry.extracapabilitiesfn(repo, proto)
533 caps['commands'][command].update(extracaps)
531 caps['commands'][command].update(extracaps)
534
532
535 caps['rawrepoformats'] = sorted(repo.requirements &
533 caps['rawrepoformats'] = sorted(repo.requirements &
536 repo.supportedformats)
534 repo.supportedformats)
537
535
538 targets = getadvertisedredirecttargets(repo, proto)
536 targets = getadvertisedredirecttargets(repo, proto)
539 if targets:
537 if targets:
540 caps[b'redirect'] = {
538 caps[b'redirect'] = {
541 b'targets': [],
539 b'targets': [],
542 b'hashes': [b'sha256', b'sha1'],
540 b'hashes': [b'sha256', b'sha1'],
543 }
541 }
544
542
545 for target in targets:
543 for target in targets:
546 entry = {
544 entry = {
547 b'name': target['name'],
545 b'name': target['name'],
548 b'protocol': target['protocol'],
546 b'protocol': target['protocol'],
549 b'uris': target['uris'],
547 b'uris': target['uris'],
550 }
548 }
551
549
552 for key in ('snirequired', 'tlsversions'):
550 for key in ('snirequired', 'tlsversions'):
553 if key in target:
551 if key in target:
554 entry[key] = target[key]
552 entry[key] = target[key]
555
553
556 caps[b'redirect'][b'targets'].append(entry)
554 caps[b'redirect'][b'targets'].append(entry)
557
555
558 return proto.addcapabilities(repo, caps)
556 return proto.addcapabilities(repo, caps)
559
557
560 def getadvertisedredirecttargets(repo, proto):
558 def getadvertisedredirecttargets(repo, proto):
561 """Obtain a list of content redirect targets.
559 """Obtain a list of content redirect targets.
562
560
563 Returns a list containing potential redirect targets that will be
561 Returns a list containing potential redirect targets that will be
564 advertised in capabilities data. Each dict MUST have the following
562 advertised in capabilities data. Each dict MUST have the following
565 keys:
563 keys:
566
564
567 name
565 name
568 The name of this redirect target. This is the identifier clients use
566 The name of this redirect target. This is the identifier clients use
569 to refer to a target. It is transferred as part of every command
567 to refer to a target. It is transferred as part of every command
570 request.
568 request.
571
569
572 protocol
570 protocol
573 Network protocol used by this target. Typically this is the string
571 Network protocol used by this target. Typically this is the string
574 in front of the ``://`` in a URL. e.g. ``https``.
572 in front of the ``://`` in a URL. e.g. ``https``.
575
573
576 uris
574 uris
577 List of representative URIs for this target. Clients can use the
575 List of representative URIs for this target. Clients can use the
578 URIs to test parsing for compatibility or for ordering preference
576 URIs to test parsing for compatibility or for ordering preference
579 for which target to use.
577 for which target to use.
580
578
581 The following optional keys are recognized:
579 The following optional keys are recognized:
582
580
583 snirequired
581 snirequired
584 Bool indicating if Server Name Indication (SNI) is required to
582 Bool indicating if Server Name Indication (SNI) is required to
585 connect to this target.
583 connect to this target.
586
584
587 tlsversions
585 tlsversions
588 List of bytes indicating which TLS versions are supported by this
586 List of bytes indicating which TLS versions are supported by this
589 target.
587 target.
590
588
591 By default, clients reflect the target order advertised by servers
589 By default, clients reflect the target order advertised by servers
592 and servers will use the first client-advertised target when picking
590 and servers will use the first client-advertised target when picking
593 a redirect target. So targets should be advertised in the order the
591 a redirect target. So targets should be advertised in the order the
594 server prefers they be used.
592 server prefers they be used.
595 """
593 """
596 return []
594 return []
597
595
598 def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
596 def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
599 extracapabilitiesfn=None):
597 extracapabilitiesfn=None):
600 """Decorator to declare a wire protocol command.
598 """Decorator to declare a wire protocol command.
601
599
602 ``name`` is the name of the wire protocol command being provided.
600 ``name`` is the name of the wire protocol command being provided.
603
601
604 ``args`` is a dict defining arguments accepted by the command. Keys are
602 ``args`` is a dict defining arguments accepted by the command. Keys are
605 the argument name. Values are dicts with the following keys:
603 the argument name. Values are dicts with the following keys:
606
604
607 ``type``
605 ``type``
608 The argument data type. Must be one of the following string
606 The argument data type. Must be one of the following string
609 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
607 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
610 or ``bool``.
608 or ``bool``.
611
609
612 ``default``
610 ``default``
613 A callable returning the default value for this argument. If not
611 A callable returning the default value for this argument. If not
614 specified, ``None`` will be the default value.
612 specified, ``None`` will be the default value.
615
613
616 ``example``
614 ``example``
617 An example value for this argument.
615 An example value for this argument.
618
616
619 ``validvalues``
617 ``validvalues``
620 Set of recognized values for this argument.
618 Set of recognized values for this argument.
621
619
622 ``permission`` defines the permission type needed to run this command.
620 ``permission`` defines the permission type needed to run this command.
623 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
621 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
624 respectively. Default is to assume command requires ``push`` permissions
622 respectively. Default is to assume command requires ``push`` permissions
625 because otherwise commands not declaring their permissions could modify
623 because otherwise commands not declaring their permissions could modify
626 a repository that is supposed to be read-only.
624 a repository that is supposed to be read-only.
627
625
628 ``cachekeyfn`` defines an optional callable that can derive the
626 ``cachekeyfn`` defines an optional callable that can derive the
629 cache key for this request.
627 cache key for this request.
630
628
631 ``extracapabilitiesfn`` defines an optional callable that defines extra
629 ``extracapabilitiesfn`` defines an optional callable that defines extra
632 command capabilities/parameters that are advertised next to the command
630 command capabilities/parameters that are advertised next to the command
633 in the capabilities data structure describing the server. The callable
631 in the capabilities data structure describing the server. The callable
634 receives as arguments the repository and protocol objects. It returns
632 receives as arguments the repository and protocol objects. It returns
635 a dict of extra fields to add to the command descriptor.
633 a dict of extra fields to add to the command descriptor.
636
634
637 Wire protocol commands are generators of objects to be serialized and
635 Wire protocol commands are generators of objects to be serialized and
638 sent to the client.
636 sent to the client.
639
637
640 If a command raises an uncaught exception, this will be translated into
638 If a command raises an uncaught exception, this will be translated into
641 a command error.
639 a command error.
642
640
643 All commands can opt in to being cacheable by defining a function
641 All commands can opt in to being cacheable by defining a function
644 (``cachekeyfn``) that is called to derive a cache key. This function
642 (``cachekeyfn``) that is called to derive a cache key. This function
645 receives the same arguments as the command itself plus a ``cacher``
643 receives the same arguments as the command itself plus a ``cacher``
646 argument containing the active cacher for the request and returns a bytes
644 argument containing the active cacher for the request and returns a bytes
647 containing the key in a cache the response to this command may be cached
645 containing the key in a cache the response to this command may be cached
648 under.
646 under.
649 """
647 """
650 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
648 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
651 if v['version'] == 2}
649 if v['version'] == 2}
652
650
653 if permission not in ('push', 'pull'):
651 if permission not in ('push', 'pull'):
654 raise error.ProgrammingError('invalid wire protocol permission; '
652 raise error.ProgrammingError('invalid wire protocol permission; '
655 'got %s; expected "push" or "pull"' %
653 'got %s; expected "push" or "pull"' %
656 permission)
654 permission)
657
655
658 if args is None:
656 if args is None:
659 args = {}
657 args = {}
660
658
661 if not isinstance(args, dict):
659 if not isinstance(args, dict):
662 raise error.ProgrammingError('arguments for version 2 commands '
660 raise error.ProgrammingError('arguments for version 2 commands '
663 'must be declared as dicts')
661 'must be declared as dicts')
664
662
665 for arg, meta in args.items():
663 for arg, meta in args.items():
666 if arg == '*':
664 if arg == '*':
667 raise error.ProgrammingError('* argument name not allowed on '
665 raise error.ProgrammingError('* argument name not allowed on '
668 'version 2 commands')
666 'version 2 commands')
669
667
670 if not isinstance(meta, dict):
668 if not isinstance(meta, dict):
671 raise error.ProgrammingError('arguments for version 2 commands '
669 raise error.ProgrammingError('arguments for version 2 commands '
672 'must declare metadata as a dict')
670 'must declare metadata as a dict')
673
671
674 if 'type' not in meta:
672 if 'type' not in meta:
675 raise error.ProgrammingError('%s argument for command %s does not '
673 raise error.ProgrammingError('%s argument for command %s does not '
676 'declare type field' % (arg, name))
674 'declare type field' % (arg, name))
677
675
678 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
676 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
679 raise error.ProgrammingError('%s argument for command %s has '
677 raise error.ProgrammingError('%s argument for command %s has '
680 'illegal type: %s' % (arg, name,
678 'illegal type: %s' % (arg, name,
681 meta['type']))
679 meta['type']))
682
680
683 if 'example' not in meta:
681 if 'example' not in meta:
684 raise error.ProgrammingError('%s argument for command %s does not '
682 raise error.ProgrammingError('%s argument for command %s does not '
685 'declare example field' % (arg, name))
683 'declare example field' % (arg, name))
686
684
687 meta['required'] = 'default' not in meta
685 meta['required'] = 'default' not in meta
688
686
689 meta.setdefault('default', lambda: None)
687 meta.setdefault('default', lambda: None)
690 meta.setdefault('validvalues', None)
688 meta.setdefault('validvalues', None)
691
689
692 def register(func):
690 def register(func):
693 if name in COMMANDS:
691 if name in COMMANDS:
694 raise error.ProgrammingError('%s command already registered '
692 raise error.ProgrammingError('%s command already registered '
695 'for version 2' % name)
693 'for version 2' % name)
696
694
697 COMMANDS[name] = wireprototypes.commandentry(
695 COMMANDS[name] = wireprototypes.commandentry(
698 func, args=args, transports=transports, permission=permission,
696 func, args=args, transports=transports, permission=permission,
699 cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
697 cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
700
698
701 return func
699 return func
702
700
703 return register
701 return register
704
702
705 def makecommandcachekeyfn(command, localversion=None, allargs=False):
703 def makecommandcachekeyfn(command, localversion=None, allargs=False):
706 """Construct a cache key derivation function with common features.
704 """Construct a cache key derivation function with common features.
707
705
708 By default, the cache key is a hash of:
706 By default, the cache key is a hash of:
709
707
710 * The command name.
708 * The command name.
711 * A global cache version number.
709 * A global cache version number.
712 * A local cache version number (passed via ``localversion``).
710 * A local cache version number (passed via ``localversion``).
713 * All the arguments passed to the command.
711 * All the arguments passed to the command.
714 * The media type used.
712 * The media type used.
715 * Wire protocol version string.
713 * Wire protocol version string.
716 * The repository path.
714 * The repository path.
717 """
715 """
718 if not allargs:
716 if not allargs:
719 raise error.ProgrammingError('only allargs=True is currently supported')
717 raise error.ProgrammingError('only allargs=True is currently supported')
720
718
721 if localversion is None:
719 if localversion is None:
722 raise error.ProgrammingError('must set localversion argument value')
720 raise error.ProgrammingError('must set localversion argument value')
723
721
724 def cachekeyfn(repo, proto, cacher, **args):
722 def cachekeyfn(repo, proto, cacher, **args):
725 spec = COMMANDS[command]
723 spec = COMMANDS[command]
726
724
727 # Commands that mutate the repo can not be cached.
725 # Commands that mutate the repo can not be cached.
728 if spec.permission == 'push':
726 if spec.permission == 'push':
729 return None
727 return None
730
728
731 # TODO config option to disable caching.
729 # TODO config option to disable caching.
732
730
733 # Our key derivation strategy is to construct a data structure
731 # Our key derivation strategy is to construct a data structure
734 # holding everything that could influence cacheability and to hash
732 # holding everything that could influence cacheability and to hash
735 # the CBOR representation of that. Using CBOR seems like it might
733 # the CBOR representation of that. Using CBOR seems like it might
736 # be overkill. However, simpler hashing mechanisms are prone to
734 # be overkill. However, simpler hashing mechanisms are prone to
737 # duplicate input issues. e.g. if you just concatenate two values,
735 # duplicate input issues. e.g. if you just concatenate two values,
738 # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
736 # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
739 # "padding" between values and prevents these problems.
737 # "padding" between values and prevents these problems.
740
738
741 # Seed the hash with various data.
739 # Seed the hash with various data.
742 state = {
740 state = {
743 # To invalidate all cache keys.
741 # To invalidate all cache keys.
744 b'globalversion': GLOBAL_CACHE_VERSION,
742 b'globalversion': GLOBAL_CACHE_VERSION,
745 # More granular cache key invalidation.
743 # More granular cache key invalidation.
746 b'localversion': localversion,
744 b'localversion': localversion,
747 # Cache keys are segmented by command.
745 # Cache keys are segmented by command.
748 b'command': command,
746 b'command': command,
749 # Throw in the media type and API version strings so changes
747 # Throw in the media type and API version strings so changes
750 # to exchange semantics invalid cache.
748 # to exchange semantics invalid cache.
751 b'mediatype': FRAMINGTYPE,
749 b'mediatype': FRAMINGTYPE,
752 b'version': HTTP_WIREPROTO_V2,
750 b'version': HTTP_WIREPROTO_V2,
753 # So same requests for different repos don't share cache keys.
751 # So same requests for different repos don't share cache keys.
754 b'repo': repo.root,
752 b'repo': repo.root,
755 }
753 }
756
754
757 # The arguments passed to us will have already been normalized.
755 # The arguments passed to us will have already been normalized.
758 # Default values will be set, etc. This is important because it
756 # Default values will be set, etc. This is important because it
759 # means that it doesn't matter if clients send an explicit argument
757 # means that it doesn't matter if clients send an explicit argument
760 # or rely on the default value: it will all normalize to the same
758 # or rely on the default value: it will all normalize to the same
761 # set of arguments on the server and therefore the same cache key.
759 # set of arguments on the server and therefore the same cache key.
762 #
760 #
763 # Arguments by their very nature must support being encoded to CBOR.
761 # Arguments by their very nature must support being encoded to CBOR.
764 # And the CBOR encoder is deterministic. So we hash the arguments
762 # And the CBOR encoder is deterministic. So we hash the arguments
765 # by feeding the CBOR of their representation into the hasher.
763 # by feeding the CBOR of their representation into the hasher.
766 if allargs:
764 if allargs:
767 state[b'args'] = pycompat.byteskwargs(args)
765 state[b'args'] = pycompat.byteskwargs(args)
768
766
769 cacher.adjustcachekeystate(state)
767 cacher.adjustcachekeystate(state)
770
768
771 hasher = hashlib.sha1()
769 hasher = hashlib.sha1()
772 for chunk in cborutil.streamencode(state):
770 for chunk in cborutil.streamencode(state):
773 hasher.update(chunk)
771 hasher.update(chunk)
774
772
775 return pycompat.sysbytes(hasher.hexdigest())
773 return pycompat.sysbytes(hasher.hexdigest())
776
774
777 return cachekeyfn
775 return cachekeyfn
778
776
779 def makeresponsecacher(repo, proto, command, args, objencoderfn,
777 def makeresponsecacher(repo, proto, command, args, objencoderfn,
780 redirecttargets, redirecthashes):
778 redirecttargets, redirecthashes):
781 """Construct a cacher for a cacheable command.
779 """Construct a cacher for a cacheable command.
782
780
783 Returns an ``iwireprotocolcommandcacher`` instance.
781 Returns an ``iwireprotocolcommandcacher`` instance.
784
782
785 Extensions can monkeypatch this function to provide custom caching
783 Extensions can monkeypatch this function to provide custom caching
786 backends.
784 backends.
787 """
785 """
788 return None
786 return None
789
787
790 def resolvenodes(repo, revisions):
788 def resolvenodes(repo, revisions):
791 """Resolve nodes from a revisions specifier data structure."""
789 """Resolve nodes from a revisions specifier data structure."""
792 cl = repo.changelog
790 cl = repo.changelog
793 clhasnode = cl.hasnode
791 clhasnode = cl.hasnode
794
792
795 seen = set()
793 seen = set()
796 nodes = []
794 nodes = []
797
795
798 if not isinstance(revisions, list):
796 if not isinstance(revisions, list):
799 raise error.WireprotoCommandError('revisions must be defined as an '
797 raise error.WireprotoCommandError('revisions must be defined as an '
800 'array')
798 'array')
801
799
802 for spec in revisions:
800 for spec in revisions:
803 if b'type' not in spec:
801 if b'type' not in spec:
804 raise error.WireprotoCommandError(
802 raise error.WireprotoCommandError(
805 'type key not present in revision specifier')
803 'type key not present in revision specifier')
806
804
807 typ = spec[b'type']
805 typ = spec[b'type']
808
806
809 if typ == b'changesetexplicit':
807 if typ == b'changesetexplicit':
810 if b'nodes' not in spec:
808 if b'nodes' not in spec:
811 raise error.WireprotoCommandError(
809 raise error.WireprotoCommandError(
812 'nodes key not present in changesetexplicit revision '
810 'nodes key not present in changesetexplicit revision '
813 'specifier')
811 'specifier')
814
812
815 for node in spec[b'nodes']:
813 for node in spec[b'nodes']:
816 if node not in seen:
814 if node not in seen:
817 nodes.append(node)
815 nodes.append(node)
818 seen.add(node)
816 seen.add(node)
819
817
820 elif typ == b'changesetexplicitdepth':
818 elif typ == b'changesetexplicitdepth':
821 for key in (b'nodes', b'depth'):
819 for key in (b'nodes', b'depth'):
822 if key not in spec:
820 if key not in spec:
823 raise error.WireprotoCommandError(
821 raise error.WireprotoCommandError(
824 '%s key not present in changesetexplicitdepth revision '
822 '%s key not present in changesetexplicitdepth revision '
825 'specifier', (key,))
823 'specifier', (key,))
826
824
827 for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
825 for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
828 spec[b'depth'] - 1):
826 spec[b'depth'] - 1):
829 node = cl.node(rev)
827 node = cl.node(rev)
830
828
831 if node not in seen:
829 if node not in seen:
832 nodes.append(node)
830 nodes.append(node)
833 seen.add(node)
831 seen.add(node)
834
832
835 elif typ == b'changesetdagrange':
833 elif typ == b'changesetdagrange':
836 for key in (b'roots', b'heads'):
834 for key in (b'roots', b'heads'):
837 if key not in spec:
835 if key not in spec:
838 raise error.WireprotoCommandError(
836 raise error.WireprotoCommandError(
839 '%s key not present in changesetdagrange revision '
837 '%s key not present in changesetdagrange revision '
840 'specifier', (key,))
838 'specifier', (key,))
841
839
842 if not spec[b'heads']:
840 if not spec[b'heads']:
843 raise error.WireprotoCommandError(
841 raise error.WireprotoCommandError(
844 'heads key in changesetdagrange cannot be empty')
842 'heads key in changesetdagrange cannot be empty')
845
843
846 if spec[b'roots']:
844 if spec[b'roots']:
847 common = [n for n in spec[b'roots'] if clhasnode(n)]
845 common = [n for n in spec[b'roots'] if clhasnode(n)]
848 else:
846 else:
849 common = [nullid]
847 common = [nullid]
850
848
851 for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
849 for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
852 if n not in seen:
850 if n not in seen:
853 nodes.append(n)
851 nodes.append(n)
854 seen.add(n)
852 seen.add(n)
855
853
856 else:
854 else:
857 raise error.WireprotoCommandError(
855 raise error.WireprotoCommandError(
858 'unknown revision specifier type: %s', (typ,))
856 'unknown revision specifier type: %s', (typ,))
859
857
860 return nodes
858 return nodes
861
859
862 @wireprotocommand('branchmap', permission='pull')
860 @wireprotocommand('branchmap', permission='pull')
863 def branchmapv2(repo, proto):
861 def branchmapv2(repo, proto):
864 yield {encoding.fromlocal(k): v
862 yield {encoding.fromlocal(k): v
865 for k, v in repo.branchmap().iteritems()}
863 for k, v in repo.branchmap().iteritems()}
866
864
867 @wireprotocommand('capabilities', permission='pull')
865 @wireprotocommand('capabilities', permission='pull')
868 def capabilitiesv2(repo, proto):
866 def capabilitiesv2(repo, proto):
869 yield _capabilitiesv2(repo, proto)
867 yield _capabilitiesv2(repo, proto)
870
868
871 @wireprotocommand(
869 @wireprotocommand(
872 'changesetdata',
870 'changesetdata',
873 args={
871 args={
874 'revisions': {
872 'revisions': {
875 'type': 'list',
873 'type': 'list',
876 'example': [{
874 'example': [{
877 b'type': b'changesetexplicit',
875 b'type': b'changesetexplicit',
878 b'nodes': [b'abcdef...'],
876 b'nodes': [b'abcdef...'],
879 }],
877 }],
880 },
878 },
881 'fields': {
879 'fields': {
882 'type': 'set',
880 'type': 'set',
883 'default': set,
881 'default': set,
884 'example': {b'parents', b'revision'},
882 'example': {b'parents', b'revision'},
885 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
883 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
886 },
884 },
887 },
885 },
888 permission='pull')
886 permission='pull')
889 def changesetdata(repo, proto, revisions, fields):
887 def changesetdata(repo, proto, revisions, fields):
890 # TODO look for unknown fields and abort when they can't be serviced.
888 # TODO look for unknown fields and abort when they can't be serviced.
891 # This could probably be validated by dispatcher using validvalues.
889 # This could probably be validated by dispatcher using validvalues.
892
890
893 cl = repo.changelog
891 cl = repo.changelog
894 outgoing = resolvenodes(repo, revisions)
892 outgoing = resolvenodes(repo, revisions)
895 publishing = repo.publishing()
893 publishing = repo.publishing()
896
894
897 if outgoing:
895 if outgoing:
898 repo.hook('preoutgoing', throw=True, source='serve')
896 repo.hook('preoutgoing', throw=True, source='serve')
899
897
900 yield {
898 yield {
901 b'totalitems': len(outgoing),
899 b'totalitems': len(outgoing),
902 }
900 }
903
901
904 # The phases of nodes already transferred to the client may have changed
902 # The phases of nodes already transferred to the client may have changed
905 # since the client last requested data. We send phase-only records
903 # since the client last requested data. We send phase-only records
906 # for these revisions, if requested.
904 # for these revisions, if requested.
907 # TODO actually do this. We'll probably want to emit phase heads
905 # TODO actually do this. We'll probably want to emit phase heads
908 # in the ancestry set of the outgoing revisions. This will ensure
906 # in the ancestry set of the outgoing revisions. This will ensure
909 # that phase updates within that set are seen.
907 # that phase updates within that set are seen.
910 if b'phase' in fields:
908 if b'phase' in fields:
911 pass
909 pass
912
910
913 nodebookmarks = {}
911 nodebookmarks = {}
914 for mark, node in repo._bookmarks.items():
912 for mark, node in repo._bookmarks.items():
915 nodebookmarks.setdefault(node, set()).add(mark)
913 nodebookmarks.setdefault(node, set()).add(mark)
916
914
917 # It is already topologically sorted by revision number.
915 # It is already topologically sorted by revision number.
918 for node in outgoing:
916 for node in outgoing:
919 d = {
917 d = {
920 b'node': node,
918 b'node': node,
921 }
919 }
922
920
923 if b'parents' in fields:
921 if b'parents' in fields:
924 d[b'parents'] = cl.parents(node)
922 d[b'parents'] = cl.parents(node)
925
923
926 if b'phase' in fields:
924 if b'phase' in fields:
927 if publishing:
925 if publishing:
928 d[b'phase'] = b'public'
926 d[b'phase'] = b'public'
929 else:
927 else:
930 ctx = repo[node]
928 ctx = repo[node]
931 d[b'phase'] = ctx.phasestr()
929 d[b'phase'] = ctx.phasestr()
932
930
933 if b'bookmarks' in fields and node in nodebookmarks:
931 if b'bookmarks' in fields and node in nodebookmarks:
934 d[b'bookmarks'] = sorted(nodebookmarks[node])
932 d[b'bookmarks'] = sorted(nodebookmarks[node])
935 del nodebookmarks[node]
933 del nodebookmarks[node]
936
934
937 followingmeta = []
935 followingmeta = []
938 followingdata = []
936 followingdata = []
939
937
940 if b'revision' in fields:
938 if b'revision' in fields:
941 revisiondata = cl.revision(node, raw=True)
939 revisiondata = cl.revision(node, raw=True)
942 followingmeta.append((b'revision', len(revisiondata)))
940 followingmeta.append((b'revision', len(revisiondata)))
943 followingdata.append(revisiondata)
941 followingdata.append(revisiondata)
944
942
945 # TODO make it possible for extensions to wrap a function or register
943 # TODO make it possible for extensions to wrap a function or register
946 # a handler to service custom fields.
944 # a handler to service custom fields.
947
945
948 if followingmeta:
946 if followingmeta:
949 d[b'fieldsfollowing'] = followingmeta
947 d[b'fieldsfollowing'] = followingmeta
950
948
951 yield d
949 yield d
952
950
953 for extra in followingdata:
951 for extra in followingdata:
954 yield extra
952 yield extra
955
953
956 # If requested, send bookmarks from nodes that didn't have revision
954 # If requested, send bookmarks from nodes that didn't have revision
957 # data sent so receiver is aware of any bookmark updates.
955 # data sent so receiver is aware of any bookmark updates.
958 if b'bookmarks' in fields:
956 if b'bookmarks' in fields:
959 for node, marks in sorted(nodebookmarks.iteritems()):
957 for node, marks in sorted(nodebookmarks.iteritems()):
960 yield {
958 yield {
961 b'node': node,
959 b'node': node,
962 b'bookmarks': sorted(marks),
960 b'bookmarks': sorted(marks),
963 }
961 }
964
962
965 class FileAccessError(Exception):
963 class FileAccessError(Exception):
966 """Represents an error accessing a specific file."""
964 """Represents an error accessing a specific file."""
967
965
968 def __init__(self, path, msg, args):
966 def __init__(self, path, msg, args):
969 self.path = path
967 self.path = path
970 self.msg = msg
968 self.msg = msg
971 self.args = args
969 self.args = args
972
970
973 def getfilestore(repo, proto, path):
971 def getfilestore(repo, proto, path):
974 """Obtain a file storage object for use with wire protocol.
972 """Obtain a file storage object for use with wire protocol.
975
973
976 Exists as a standalone function so extensions can monkeypatch to add
974 Exists as a standalone function so extensions can monkeypatch to add
977 access control.
975 access control.
978 """
976 """
979 # This seems to work even if the file doesn't exist. So catch
977 # This seems to work even if the file doesn't exist. So catch
980 # "empty" files and return an error.
978 # "empty" files and return an error.
981 fl = repo.file(path)
979 fl = repo.file(path)
982
980
983 if not len(fl):
981 if not len(fl):
984 raise FileAccessError(path, 'unknown file: %s', (path,))
982 raise FileAccessError(path, 'unknown file: %s', (path,))
985
983
986 return fl
984 return fl
987
985
988 def emitfilerevisions(repo, path, revisions, linknodes, fields):
986 def emitfilerevisions(repo, path, revisions, linknodes, fields):
989 for revision in revisions:
987 for revision in revisions:
990 d = {
988 d = {
991 b'node': revision.node,
989 b'node': revision.node,
992 }
990 }
993
991
994 if b'parents' in fields:
992 if b'parents' in fields:
995 d[b'parents'] = [revision.p1node, revision.p2node]
993 d[b'parents'] = [revision.p1node, revision.p2node]
996
994
997 if b'linknode' in fields:
995 if b'linknode' in fields:
998 d[b'linknode'] = linknodes[revision.node]
996 d[b'linknode'] = linknodes[revision.node]
999
997
1000 followingmeta = []
998 followingmeta = []
1001 followingdata = []
999 followingdata = []
1002
1000
1003 if b'revision' in fields:
1001 if b'revision' in fields:
1004 if revision.revision is not None:
1002 if revision.revision is not None:
1005 followingmeta.append((b'revision', len(revision.revision)))
1003 followingmeta.append((b'revision', len(revision.revision)))
1006 followingdata.append(revision.revision)
1004 followingdata.append(revision.revision)
1007 else:
1005 else:
1008 d[b'deltabasenode'] = revision.basenode
1006 d[b'deltabasenode'] = revision.basenode
1009 followingmeta.append((b'delta', len(revision.delta)))
1007 followingmeta.append((b'delta', len(revision.delta)))
1010 followingdata.append(revision.delta)
1008 followingdata.append(revision.delta)
1011
1009
1012 if followingmeta:
1010 if followingmeta:
1013 d[b'fieldsfollowing'] = followingmeta
1011 d[b'fieldsfollowing'] = followingmeta
1014
1012
1015 yield d
1013 yield d
1016
1014
1017 for extra in followingdata:
1015 for extra in followingdata:
1018 yield extra
1016 yield extra
1019
1017
1020 def makefilematcher(repo, pathfilter):
1018 def makefilematcher(repo, pathfilter):
1021 """Construct a matcher from a path filter dict."""
1019 """Construct a matcher from a path filter dict."""
1022
1020
1023 # Validate values.
1021 # Validate values.
1024 if pathfilter:
1022 if pathfilter:
1025 for key in (b'include', b'exclude'):
1023 for key in (b'include', b'exclude'):
1026 for pattern in pathfilter.get(key, []):
1024 for pattern in pathfilter.get(key, []):
1027 if not pattern.startswith((b'path:', b'rootfilesin:')):
1025 if not pattern.startswith((b'path:', b'rootfilesin:')):
1028 raise error.WireprotoCommandError(
1026 raise error.WireprotoCommandError(
1029 '%s pattern must begin with `path:` or `rootfilesin:`; '
1027 '%s pattern must begin with `path:` or `rootfilesin:`; '
1030 'got %s', (key, pattern))
1028 'got %s', (key, pattern))
1031
1029
1032 if pathfilter:
1030 if pathfilter:
1033 matcher = matchmod.match(repo.root, b'',
1031 matcher = matchmod.match(repo.root, b'',
1034 include=pathfilter.get(b'include', []),
1032 include=pathfilter.get(b'include', []),
1035 exclude=pathfilter.get(b'exclude', []))
1033 exclude=pathfilter.get(b'exclude', []))
1036 else:
1034 else:
1037 matcher = matchmod.match(repo.root, b'')
1035 matcher = matchmod.match(repo.root, b'')
1038
1036
1039 # Requested patterns could include files not in the local store. So
1037 # Requested patterns could include files not in the local store. So
1040 # filter those out.
1038 # filter those out.
1041 return repo.narrowmatch(matcher)
1039 return repo.narrowmatch(matcher)
1042
1040
1043 @wireprotocommand(
1041 @wireprotocommand(
1044 'filedata',
1042 'filedata',
1045 args={
1043 args={
1046 'haveparents': {
1044 'haveparents': {
1047 'type': 'bool',
1045 'type': 'bool',
1048 'default': lambda: False,
1046 'default': lambda: False,
1049 'example': True,
1047 'example': True,
1050 },
1048 },
1051 'nodes': {
1049 'nodes': {
1052 'type': 'list',
1050 'type': 'list',
1053 'example': [b'0123456...'],
1051 'example': [b'0123456...'],
1054 },
1052 },
1055 'fields': {
1053 'fields': {
1056 'type': 'set',
1054 'type': 'set',
1057 'default': set,
1055 'default': set,
1058 'example': {b'parents', b'revision'},
1056 'example': {b'parents', b'revision'},
1059 'validvalues': {b'parents', b'revision', b'linknode'},
1057 'validvalues': {b'parents', b'revision', b'linknode'},
1060 },
1058 },
1061 'path': {
1059 'path': {
1062 'type': 'bytes',
1060 'type': 'bytes',
1063 'example': b'foo.txt',
1061 'example': b'foo.txt',
1064 }
1062 }
1065 },
1063 },
1066 permission='pull',
1064 permission='pull',
1067 # TODO censoring a file revision won't invalidate the cache.
1065 # TODO censoring a file revision won't invalidate the cache.
1068 # Figure out a way to take censoring into account when deriving
1066 # Figure out a way to take censoring into account when deriving
1069 # the cache key.
1067 # the cache key.
1070 cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
1068 cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
1071 def filedata(repo, proto, haveparents, nodes, fields, path):
1069 def filedata(repo, proto, haveparents, nodes, fields, path):
1072 # TODO this API allows access to file revisions that are attached to
1070 # TODO this API allows access to file revisions that are attached to
1073 # secret changesets. filesdata does not have this problem. Maybe this
1071 # secret changesets. filesdata does not have this problem. Maybe this
1074 # API should be deleted?
1072 # API should be deleted?
1075
1073
1076 try:
1074 try:
1077 # Extensions may wish to access the protocol handler.
1075 # Extensions may wish to access the protocol handler.
1078 store = getfilestore(repo, proto, path)
1076 store = getfilestore(repo, proto, path)
1079 except FileAccessError as e:
1077 except FileAccessError as e:
1080 raise error.WireprotoCommandError(e.msg, e.args)
1078 raise error.WireprotoCommandError(e.msg, e.args)
1081
1079
1082 clnode = repo.changelog.node
1080 clnode = repo.changelog.node
1083 linknodes = {}
1081 linknodes = {}
1084
1082
1085 # Validate requested nodes.
1083 # Validate requested nodes.
1086 for node in nodes:
1084 for node in nodes:
1087 try:
1085 try:
1088 store.rev(node)
1086 store.rev(node)
1089 except error.LookupError:
1087 except error.LookupError:
1090 raise error.WireprotoCommandError('unknown file node: %s',
1088 raise error.WireprotoCommandError('unknown file node: %s',
1091 (hex(node),))
1089 (hex(node),))
1092
1090
1093 # TODO by creating the filectx against a specific file revision
1091 # TODO by creating the filectx against a specific file revision
1094 # instead of changeset, linkrev() is always used. This is wrong for
1092 # instead of changeset, linkrev() is always used. This is wrong for
1095 # cases where linkrev() may refer to a hidden changeset. But since this
1093 # cases where linkrev() may refer to a hidden changeset. But since this
1096 # API doesn't know anything about changesets, we're not sure how to
1094 # API doesn't know anything about changesets, we're not sure how to
1097 # disambiguate the linknode. Perhaps we should delete this API?
1095 # disambiguate the linknode. Perhaps we should delete this API?
1098 fctx = repo.filectx(path, fileid=node)
1096 fctx = repo.filectx(path, fileid=node)
1099 linknodes[node] = clnode(fctx.introrev())
1097 linknodes[node] = clnode(fctx.introrev())
1100
1098
1101 revisions = store.emitrevisions(nodes,
1099 revisions = store.emitrevisions(nodes,
1102 revisiondata=b'revision' in fields,
1100 revisiondata=b'revision' in fields,
1103 assumehaveparentrevisions=haveparents)
1101 assumehaveparentrevisions=haveparents)
1104
1102
1105 yield {
1103 yield {
1106 b'totalitems': len(nodes),
1104 b'totalitems': len(nodes),
1107 }
1105 }
1108
1106
1109 for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
1107 for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
1110 yield o
1108 yield o
1111
1109
1112 def filesdatacapabilities(repo, proto):
1110 def filesdatacapabilities(repo, proto):
1113 batchsize = repo.ui.configint(
1111 batchsize = repo.ui.configint(
1114 b'experimental', b'server.filesdata.recommended-batch-size')
1112 b'experimental', b'server.filesdata.recommended-batch-size')
1115 return {
1113 return {
1116 b'recommendedbatchsize': batchsize,
1114 b'recommendedbatchsize': batchsize,
1117 }
1115 }
1118
1116
1119 @wireprotocommand(
1117 @wireprotocommand(
1120 'filesdata',
1118 'filesdata',
1121 args={
1119 args={
1122 'haveparents': {
1120 'haveparents': {
1123 'type': 'bool',
1121 'type': 'bool',
1124 'default': lambda: False,
1122 'default': lambda: False,
1125 'example': True,
1123 'example': True,
1126 },
1124 },
1127 'fields': {
1125 'fields': {
1128 'type': 'set',
1126 'type': 'set',
1129 'default': set,
1127 'default': set,
1130 'example': {b'parents', b'revision'},
1128 'example': {b'parents', b'revision'},
1131 'validvalues': {b'firstchangeset', b'linknode', b'parents',
1129 'validvalues': {b'firstchangeset', b'linknode', b'parents',
1132 b'revision'},
1130 b'revision'},
1133 },
1131 },
1134 'pathfilter': {
1132 'pathfilter': {
1135 'type': 'dict',
1133 'type': 'dict',
1136 'default': lambda: None,
1134 'default': lambda: None,
1137 'example': {b'include': [b'path:tests']},
1135 'example': {b'include': [b'path:tests']},
1138 },
1136 },
1139 'revisions': {
1137 'revisions': {
1140 'type': 'list',
1138 'type': 'list',
1141 'example': [{
1139 'example': [{
1142 b'type': b'changesetexplicit',
1140 b'type': b'changesetexplicit',
1143 b'nodes': [b'abcdef...'],
1141 b'nodes': [b'abcdef...'],
1144 }],
1142 }],
1145 },
1143 },
1146 },
1144 },
1147 permission='pull',
1145 permission='pull',
1148 # TODO censoring a file revision won't invalidate the cache.
1146 # TODO censoring a file revision won't invalidate the cache.
1149 # Figure out a way to take censoring into account when deriving
1147 # Figure out a way to take censoring into account when deriving
1150 # the cache key.
1148 # the cache key.
1151 cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
1149 cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
1152 extracapabilitiesfn=filesdatacapabilities)
1150 extracapabilitiesfn=filesdatacapabilities)
1153 def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
1151 def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
1154 # TODO This should operate on a repo that exposes obsolete changesets. There
1152 # TODO This should operate on a repo that exposes obsolete changesets. There
1155 # is a race between a client making a push that obsoletes a changeset and
1153 # is a race between a client making a push that obsoletes a changeset and
1156 # another client fetching files data for that changeset. If a client has a
1154 # another client fetching files data for that changeset. If a client has a
1157 # changeset, it should probably be allowed to access files data for that
1155 # changeset, it should probably be allowed to access files data for that
1158 # changeset.
1156 # changeset.
1159
1157
1160 outgoing = resolvenodes(repo, revisions)
1158 outgoing = resolvenodes(repo, revisions)
1161 filematcher = makefilematcher(repo, pathfilter)
1159 filematcher = makefilematcher(repo, pathfilter)
1162
1160
1163 # path -> {fnode: linknode}
1161 # path -> {fnode: linknode}
1164 fnodes = collections.defaultdict(dict)
1162 fnodes = collections.defaultdict(dict)
1165
1163
1166 # We collect the set of relevant file revisions by iterating the changeset
1164 # We collect the set of relevant file revisions by iterating the changeset
1167 # revisions and either walking the set of files recorded in the changeset
1165 # revisions and either walking the set of files recorded in the changeset
1168 # or by walking the manifest at that revision. There is probably room for a
1166 # or by walking the manifest at that revision. There is probably room for a
1169 # storage-level API to request this data, as it can be expensive to compute
1167 # storage-level API to request this data, as it can be expensive to compute
1170 # and would benefit from caching or alternate storage from what revlogs
1168 # and would benefit from caching or alternate storage from what revlogs
1171 # provide.
1169 # provide.
1172 for node in outgoing:
1170 for node in outgoing:
1173 ctx = repo[node]
1171 ctx = repo[node]
1174 mctx = ctx.manifestctx()
1172 mctx = ctx.manifestctx()
1175 md = mctx.read()
1173 md = mctx.read()
1176
1174
1177 if haveparents:
1175 if haveparents:
1178 checkpaths = ctx.files()
1176 checkpaths = ctx.files()
1179 else:
1177 else:
1180 checkpaths = md.keys()
1178 checkpaths = md.keys()
1181
1179
1182 for path in checkpaths:
1180 for path in checkpaths:
1183 fnode = md[path]
1181 fnode = md[path]
1184
1182
1185 if path in fnodes and fnode in fnodes[path]:
1183 if path in fnodes and fnode in fnodes[path]:
1186 continue
1184 continue
1187
1185
1188 if not filematcher(path):
1186 if not filematcher(path):
1189 continue
1187 continue
1190
1188
1191 fnodes[path].setdefault(fnode, node)
1189 fnodes[path].setdefault(fnode, node)
1192
1190
1193 yield {
1191 yield {
1194 b'totalpaths': len(fnodes),
1192 b'totalpaths': len(fnodes),
1195 b'totalitems': sum(len(v) for v in fnodes.values())
1193 b'totalitems': sum(len(v) for v in fnodes.values())
1196 }
1194 }
1197
1195
1198 for path, filenodes in sorted(fnodes.items()):
1196 for path, filenodes in sorted(fnodes.items()):
1199 try:
1197 try:
1200 store = getfilestore(repo, proto, path)
1198 store = getfilestore(repo, proto, path)
1201 except FileAccessError as e:
1199 except FileAccessError as e:
1202 raise error.WireprotoCommandError(e.msg, e.args)
1200 raise error.WireprotoCommandError(e.msg, e.args)
1203
1201
1204 yield {
1202 yield {
1205 b'path': path,
1203 b'path': path,
1206 b'totalitems': len(filenodes),
1204 b'totalitems': len(filenodes),
1207 }
1205 }
1208
1206
1209 revisions = store.emitrevisions(filenodes.keys(),
1207 revisions = store.emitrevisions(filenodes.keys(),
1210 revisiondata=b'revision' in fields,
1208 revisiondata=b'revision' in fields,
1211 assumehaveparentrevisions=haveparents)
1209 assumehaveparentrevisions=haveparents)
1212
1210
1213 for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
1211 for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
1214 yield o
1212 yield o
1215
1213
1216 @wireprotocommand(
1214 @wireprotocommand(
1217 'heads',
1215 'heads',
1218 args={
1216 args={
1219 'publiconly': {
1217 'publiconly': {
1220 'type': 'bool',
1218 'type': 'bool',
1221 'default': lambda: False,
1219 'default': lambda: False,
1222 'example': False,
1220 'example': False,
1223 },
1221 },
1224 },
1222 },
1225 permission='pull')
1223 permission='pull')
1226 def headsv2(repo, proto, publiconly):
1224 def headsv2(repo, proto, publiconly):
1227 if publiconly:
1225 if publiconly:
1228 repo = repo.filtered('immutable')
1226 repo = repo.filtered('immutable')
1229
1227
1230 yield repo.heads()
1228 yield repo.heads()
1231
1229
1232 @wireprotocommand(
1230 @wireprotocommand(
1233 'known',
1231 'known',
1234 args={
1232 args={
1235 'nodes': {
1233 'nodes': {
1236 'type': 'list',
1234 'type': 'list',
1237 'default': list,
1235 'default': list,
1238 'example': [b'deadbeef'],
1236 'example': [b'deadbeef'],
1239 },
1237 },
1240 },
1238 },
1241 permission='pull')
1239 permission='pull')
1242 def knownv2(repo, proto, nodes):
1240 def knownv2(repo, proto, nodes):
1243 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
1241 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
1244 yield result
1242 yield result
1245
1243
1246 @wireprotocommand(
1244 @wireprotocommand(
1247 'listkeys',
1245 'listkeys',
1248 args={
1246 args={
1249 'namespace': {
1247 'namespace': {
1250 'type': 'bytes',
1248 'type': 'bytes',
1251 'example': b'ns',
1249 'example': b'ns',
1252 },
1250 },
1253 },
1251 },
1254 permission='pull')
1252 permission='pull')
1255 def listkeysv2(repo, proto, namespace):
1253 def listkeysv2(repo, proto, namespace):
1256 keys = repo.listkeys(encoding.tolocal(namespace))
1254 keys = repo.listkeys(encoding.tolocal(namespace))
1257 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
1255 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
1258 for k, v in keys.iteritems()}
1256 for k, v in keys.iteritems()}
1259
1257
1260 yield keys
1258 yield keys
1261
1259
1262 @wireprotocommand(
1260 @wireprotocommand(
1263 'lookup',
1261 'lookup',
1264 args={
1262 args={
1265 'key': {
1263 'key': {
1266 'type': 'bytes',
1264 'type': 'bytes',
1267 'example': b'foo',
1265 'example': b'foo',
1268 },
1266 },
1269 },
1267 },
1270 permission='pull')
1268 permission='pull')
1271 def lookupv2(repo, proto, key):
1269 def lookupv2(repo, proto, key):
1272 key = encoding.tolocal(key)
1270 key = encoding.tolocal(key)
1273
1271
1274 # TODO handle exception.
1272 # TODO handle exception.
1275 node = repo.lookup(key)
1273 node = repo.lookup(key)
1276
1274
1277 yield node
1275 yield node
1278
1276
1279 def manifestdatacapabilities(repo, proto):
1277 def manifestdatacapabilities(repo, proto):
1280 batchsize = repo.ui.configint(
1278 batchsize = repo.ui.configint(
1281 b'experimental', b'server.manifestdata.recommended-batch-size')
1279 b'experimental', b'server.manifestdata.recommended-batch-size')
1282
1280
1283 return {
1281 return {
1284 b'recommendedbatchsize': batchsize,
1282 b'recommendedbatchsize': batchsize,
1285 }
1283 }
1286
1284
1287 @wireprotocommand(
1285 @wireprotocommand(
1288 'manifestdata',
1286 'manifestdata',
1289 args={
1287 args={
1290 'nodes': {
1288 'nodes': {
1291 'type': 'list',
1289 'type': 'list',
1292 'example': [b'0123456...'],
1290 'example': [b'0123456...'],
1293 },
1291 },
1294 'haveparents': {
1292 'haveparents': {
1295 'type': 'bool',
1293 'type': 'bool',
1296 'default': lambda: False,
1294 'default': lambda: False,
1297 'example': True,
1295 'example': True,
1298 },
1296 },
1299 'fields': {
1297 'fields': {
1300 'type': 'set',
1298 'type': 'set',
1301 'default': set,
1299 'default': set,
1302 'example': {b'parents', b'revision'},
1300 'example': {b'parents', b'revision'},
1303 'validvalues': {b'parents', b'revision'},
1301 'validvalues': {b'parents', b'revision'},
1304 },
1302 },
1305 'tree': {
1303 'tree': {
1306 'type': 'bytes',
1304 'type': 'bytes',
1307 'example': b'',
1305 'example': b'',
1308 },
1306 },
1309 },
1307 },
1310 permission='pull',
1308 permission='pull',
1311 cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
1309 cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
1312 extracapabilitiesfn=manifestdatacapabilities)
1310 extracapabilitiesfn=manifestdatacapabilities)
1313 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
1311 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
1314 store = repo.manifestlog.getstorage(tree)
1312 store = repo.manifestlog.getstorage(tree)
1315
1313
1316 # Validate the node is known and abort on unknown revisions.
1314 # Validate the node is known and abort on unknown revisions.
1317 for node in nodes:
1315 for node in nodes:
1318 try:
1316 try:
1319 store.rev(node)
1317 store.rev(node)
1320 except error.LookupError:
1318 except error.LookupError:
1321 raise error.WireprotoCommandError(
1319 raise error.WireprotoCommandError(
1322 'unknown node: %s', (node,))
1320 'unknown node: %s', (node,))
1323
1321
1324 revisions = store.emitrevisions(nodes,
1322 revisions = store.emitrevisions(nodes,
1325 revisiondata=b'revision' in fields,
1323 revisiondata=b'revision' in fields,
1326 assumehaveparentrevisions=haveparents)
1324 assumehaveparentrevisions=haveparents)
1327
1325
1328 yield {
1326 yield {
1329 b'totalitems': len(nodes),
1327 b'totalitems': len(nodes),
1330 }
1328 }
1331
1329
1332 for revision in revisions:
1330 for revision in revisions:
1333 d = {
1331 d = {
1334 b'node': revision.node,
1332 b'node': revision.node,
1335 }
1333 }
1336
1334
1337 if b'parents' in fields:
1335 if b'parents' in fields:
1338 d[b'parents'] = [revision.p1node, revision.p2node]
1336 d[b'parents'] = [revision.p1node, revision.p2node]
1339
1337
1340 followingmeta = []
1338 followingmeta = []
1341 followingdata = []
1339 followingdata = []
1342
1340
1343 if b'revision' in fields:
1341 if b'revision' in fields:
1344 if revision.revision is not None:
1342 if revision.revision is not None:
1345 followingmeta.append((b'revision', len(revision.revision)))
1343 followingmeta.append((b'revision', len(revision.revision)))
1346 followingdata.append(revision.revision)
1344 followingdata.append(revision.revision)
1347 else:
1345 else:
1348 d[b'deltabasenode'] = revision.basenode
1346 d[b'deltabasenode'] = revision.basenode
1349 followingmeta.append((b'delta', len(revision.delta)))
1347 followingmeta.append((b'delta', len(revision.delta)))
1350 followingdata.append(revision.delta)
1348 followingdata.append(revision.delta)
1351
1349
1352 if followingmeta:
1350 if followingmeta:
1353 d[b'fieldsfollowing'] = followingmeta
1351 d[b'fieldsfollowing'] = followingmeta
1354
1352
1355 yield d
1353 yield d
1356
1354
1357 for extra in followingdata:
1355 for extra in followingdata:
1358 yield extra
1356 yield extra
1359
1357
1360 @wireprotocommand(
1358 @wireprotocommand(
1361 'pushkey',
1359 'pushkey',
1362 args={
1360 args={
1363 'namespace': {
1361 'namespace': {
1364 'type': 'bytes',
1362 'type': 'bytes',
1365 'example': b'ns',
1363 'example': b'ns',
1366 },
1364 },
1367 'key': {
1365 'key': {
1368 'type': 'bytes',
1366 'type': 'bytes',
1369 'example': b'key',
1367 'example': b'key',
1370 },
1368 },
1371 'old': {
1369 'old': {
1372 'type': 'bytes',
1370 'type': 'bytes',
1373 'example': b'old',
1371 'example': b'old',
1374 },
1372 },
1375 'new': {
1373 'new': {
1376 'type': 'bytes',
1374 'type': 'bytes',
1377 'example': 'new',
1375 'example': 'new',
1378 },
1376 },
1379 },
1377 },
1380 permission='push')
1378 permission='push')
1381 def pushkeyv2(repo, proto, namespace, key, old, new):
1379 def pushkeyv2(repo, proto, namespace, key, old, new):
1382 # TODO handle ui output redirection
1380 # TODO handle ui output redirection
1383 yield repo.pushkey(encoding.tolocal(namespace),
1381 yield repo.pushkey(encoding.tolocal(namespace),
1384 encoding.tolocal(key),
1382 encoding.tolocal(key),
1385 encoding.tolocal(old),
1383 encoding.tolocal(old),
1386 encoding.tolocal(new))
1384 encoding.tolocal(new))
1387
1385
1388
1386
1389 @wireprotocommand(
1387 @wireprotocommand(
1390 'rawstorefiledata',
1388 'rawstorefiledata',
1391 args={
1389 args={
1392 'files': {
1390 'files': {
1393 'type': 'list',
1391 'type': 'list',
1394 'example': [b'changelog', b'manifestlog'],
1392 'example': [b'changelog', b'manifestlog'],
1395 },
1393 },
1396 'pathfilter': {
1394 'pathfilter': {
1397 'type': 'list',
1395 'type': 'list',
1398 'default': lambda: None,
1396 'default': lambda: None,
1399 'example': {b'include': [b'path:tests']},
1397 'example': {b'include': [b'path:tests']},
1400 },
1398 },
1401 },
1399 },
1402 permission='pull')
1400 permission='pull')
1403 def rawstorefiledata(repo, proto, files, pathfilter):
1401 def rawstorefiledata(repo, proto, files, pathfilter):
1404 if not streamclone.allowservergeneration(repo):
1402 if not streamclone.allowservergeneration(repo):
1405 raise error.WireprotoCommandError(b'stream clone is disabled')
1403 raise error.WireprotoCommandError(b'stream clone is disabled')
1406
1404
1407 # TODO support dynamically advertising what store files "sets" are
1405 # TODO support dynamically advertising what store files "sets" are
1408 # available. For now, we support changelog, manifestlog, and files.
1406 # available. For now, we support changelog, manifestlog, and files.
1409 files = set(files)
1407 files = set(files)
1410 allowedfiles = {b'changelog', b'manifestlog'}
1408 allowedfiles = {b'changelog', b'manifestlog'}
1411
1409
1412 unsupported = files - allowedfiles
1410 unsupported = files - allowedfiles
1413 if unsupported:
1411 if unsupported:
1414 raise error.WireprotoCommandError(b'unknown file type: %s',
1412 raise error.WireprotoCommandError(b'unknown file type: %s',
1415 (b', '.join(sorted(unsupported)),))
1413 (b', '.join(sorted(unsupported)),))
1416
1414
1417 with repo.lock():
1415 with repo.lock():
1418 topfiles = list(repo.store.topfiles())
1416 topfiles = list(repo.store.topfiles())
1419
1417
1420 sendfiles = []
1418 sendfiles = []
1421 totalsize = 0
1419 totalsize = 0
1422
1420
1423 # TODO this is a bunch of storage layer interface abstractions because
1421 # TODO this is a bunch of storage layer interface abstractions because
1424 # it assumes revlogs.
1422 # it assumes revlogs.
1425 for name, encodedname, size in topfiles:
1423 for name, encodedname, size in topfiles:
1426 if b'changelog' in files and name.startswith(b'00changelog'):
1424 if b'changelog' in files and name.startswith(b'00changelog'):
1427 pass
1425 pass
1428 elif b'manifestlog' in files and name.startswith(b'00manifest'):
1426 elif b'manifestlog' in files and name.startswith(b'00manifest'):
1429 pass
1427 pass
1430 else:
1428 else:
1431 continue
1429 continue
1432
1430
1433 sendfiles.append((b'store', name, size))
1431 sendfiles.append((b'store', name, size))
1434 totalsize += size
1432 totalsize += size
1435
1433
1436 yield {
1434 yield {
1437 b'filecount': len(sendfiles),
1435 b'filecount': len(sendfiles),
1438 b'totalsize': totalsize,
1436 b'totalsize': totalsize,
1439 }
1437 }
1440
1438
1441 for location, name, size in sendfiles:
1439 for location, name, size in sendfiles:
1442 yield {
1440 yield {
1443 b'location': location,
1441 b'location': location,
1444 b'path': name,
1442 b'path': name,
1445 b'size': size,
1443 b'size': size,
1446 }
1444 }
1447
1445
1448 # We have to use a closure for this to ensure the context manager is
1446 # We have to use a closure for this to ensure the context manager is
1449 # closed only after sending the final chunk.
1447 # closed only after sending the final chunk.
1450 def getfiledata():
1448 def getfiledata():
1451 with repo.svfs(name, 'rb', auditpath=False) as fh:
1449 with repo.svfs(name, 'rb', auditpath=False) as fh:
1452 for chunk in util.filechunkiter(fh, limit=size):
1450 for chunk in util.filechunkiter(fh, limit=size):
1453 yield chunk
1451 yield chunk
1454
1452
1455 yield wireprototypes.indefinitebytestringresponse(
1453 yield wireprototypes.indefinitebytestringresponse(
1456 getfiledata())
1454 getfiledata())
General Comments 0
You need to be logged in to leave comments. Login now