##// END OF EJS Templates
wireprotov2server: use pycompat.strkwargs when calling cachekeyfn...
Gregory Szorc -
r41421:56fcbac6 default
parent child Browse files
Show More
@@ -1,1455 +1,1456 b''
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from . import (
18 from . import (
19 discovery,
19 discovery,
20 encoding,
20 encoding,
21 error,
21 error,
22 match as matchmod,
22 match as matchmod,
23 narrowspec,
23 narrowspec,
24 pycompat,
24 pycompat,
25 streamclone,
25 streamclone,
26 util,
26 util,
27 wireprotoframing,
27 wireprotoframing,
28 wireprototypes,
28 wireprototypes,
29 )
29 )
30 from .utils import (
30 from .utils import (
31 cborutil,
31 cborutil,
32 interfaceutil,
32 interfaceutil,
33 stringutil,
33 stringutil,
34 )
34 )
35
35
36 FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
36 FRAMINGTYPE = b'application/mercurial-exp-framing-0006'
37
37
38 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
38 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
39
39
40 COMMANDS = wireprototypes.commanddict()
40 COMMANDS = wireprototypes.commanddict()
41
41
42 # Value inserted into cache key computation function. Change the value to
42 # Value inserted into cache key computation function. Change the value to
43 # force new cache keys for every command request. This should be done when
43 # force new cache keys for every command request. This should be done when
44 # there is a change to how caching works, etc.
44 # there is a change to how caching works, etc.
45 GLOBAL_CACHE_VERSION = 1
45 GLOBAL_CACHE_VERSION = 1
46
46
47 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
47 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
48 from .hgweb import common as hgwebcommon
48 from .hgweb import common as hgwebcommon
49
49
50 # URL space looks like: <permissions>/<command>, where <permission> can
50 # URL space looks like: <permissions>/<command>, where <permission> can
51 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
51 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
52
52
53 # Root URL does nothing meaningful... yet.
53 # Root URL does nothing meaningful... yet.
54 if not urlparts:
54 if not urlparts:
55 res.status = b'200 OK'
55 res.status = b'200 OK'
56 res.headers[b'Content-Type'] = b'text/plain'
56 res.headers[b'Content-Type'] = b'text/plain'
57 res.setbodybytes(_('HTTP version 2 API handler'))
57 res.setbodybytes(_('HTTP version 2 API handler'))
58 return
58 return
59
59
60 if len(urlparts) == 1:
60 if len(urlparts) == 1:
61 res.status = b'404 Not Found'
61 res.status = b'404 Not Found'
62 res.headers[b'Content-Type'] = b'text/plain'
62 res.headers[b'Content-Type'] = b'text/plain'
63 res.setbodybytes(_('do not know how to process %s\n') %
63 res.setbodybytes(_('do not know how to process %s\n') %
64 req.dispatchpath)
64 req.dispatchpath)
65 return
65 return
66
66
67 permission, command = urlparts[0:2]
67 permission, command = urlparts[0:2]
68
68
69 if permission not in (b'ro', b'rw'):
69 if permission not in (b'ro', b'rw'):
70 res.status = b'404 Not Found'
70 res.status = b'404 Not Found'
71 res.headers[b'Content-Type'] = b'text/plain'
71 res.headers[b'Content-Type'] = b'text/plain'
72 res.setbodybytes(_('unknown permission: %s') % permission)
72 res.setbodybytes(_('unknown permission: %s') % permission)
73 return
73 return
74
74
75 if req.method != 'POST':
75 if req.method != 'POST':
76 res.status = b'405 Method Not Allowed'
76 res.status = b'405 Method Not Allowed'
77 res.headers[b'Allow'] = b'POST'
77 res.headers[b'Allow'] = b'POST'
78 res.setbodybytes(_('commands require POST requests'))
78 res.setbodybytes(_('commands require POST requests'))
79 return
79 return
80
80
81 # At some point we'll want to use our own API instead of recycling the
81 # At some point we'll want to use our own API instead of recycling the
82 # behavior of version 1 of the wire protocol...
82 # behavior of version 1 of the wire protocol...
83 # TODO return reasonable responses - not responses that overload the
83 # TODO return reasonable responses - not responses that overload the
84 # HTTP status line message for error reporting.
84 # HTTP status line message for error reporting.
85 try:
85 try:
86 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
86 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
87 except hgwebcommon.ErrorResponse as e:
87 except hgwebcommon.ErrorResponse as e:
88 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
88 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
89 for k, v in e.headers:
89 for k, v in e.headers:
90 res.headers[k] = v
90 res.headers[k] = v
91 res.setbodybytes('permission denied')
91 res.setbodybytes('permission denied')
92 return
92 return
93
93
94 # We have a special endpoint to reflect the request back at the client.
94 # We have a special endpoint to reflect the request back at the client.
95 if command == b'debugreflect':
95 if command == b'debugreflect':
96 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
96 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
97 return
97 return
98
98
99 # Extra commands that we handle that aren't really wire protocol
99 # Extra commands that we handle that aren't really wire protocol
100 # commands. Think extra hard before making this hackery available to
100 # commands. Think extra hard before making this hackery available to
101 # extension.
101 # extension.
102 extracommands = {'multirequest'}
102 extracommands = {'multirequest'}
103
103
104 if command not in COMMANDS and command not in extracommands:
104 if command not in COMMANDS and command not in extracommands:
105 res.status = b'404 Not Found'
105 res.status = b'404 Not Found'
106 res.headers[b'Content-Type'] = b'text/plain'
106 res.headers[b'Content-Type'] = b'text/plain'
107 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
107 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
108 return
108 return
109
109
110 repo = rctx.repo
110 repo = rctx.repo
111 ui = repo.ui
111 ui = repo.ui
112
112
113 proto = httpv2protocolhandler(req, ui)
113 proto = httpv2protocolhandler(req, ui)
114
114
115 if (not COMMANDS.commandavailable(command, proto)
115 if (not COMMANDS.commandavailable(command, proto)
116 and command not in extracommands):
116 and command not in extracommands):
117 res.status = b'404 Not Found'
117 res.status = b'404 Not Found'
118 res.headers[b'Content-Type'] = b'text/plain'
118 res.headers[b'Content-Type'] = b'text/plain'
119 res.setbodybytes(_('invalid wire protocol command: %s') % command)
119 res.setbodybytes(_('invalid wire protocol command: %s') % command)
120 return
120 return
121
121
122 # TODO consider cases where proxies may add additional Accept headers.
122 # TODO consider cases where proxies may add additional Accept headers.
123 if req.headers.get(b'Accept') != FRAMINGTYPE:
123 if req.headers.get(b'Accept') != FRAMINGTYPE:
124 res.status = b'406 Not Acceptable'
124 res.status = b'406 Not Acceptable'
125 res.headers[b'Content-Type'] = b'text/plain'
125 res.headers[b'Content-Type'] = b'text/plain'
126 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
126 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
127 % FRAMINGTYPE)
127 % FRAMINGTYPE)
128 return
128 return
129
129
130 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
130 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
131 res.status = b'415 Unsupported Media Type'
131 res.status = b'415 Unsupported Media Type'
132 # TODO we should send a response with appropriate media type,
132 # TODO we should send a response with appropriate media type,
133 # since client does Accept it.
133 # since client does Accept it.
134 res.headers[b'Content-Type'] = b'text/plain'
134 res.headers[b'Content-Type'] = b'text/plain'
135 res.setbodybytes(_('client MUST send Content-Type header with '
135 res.setbodybytes(_('client MUST send Content-Type header with '
136 'value: %s\n') % FRAMINGTYPE)
136 'value: %s\n') % FRAMINGTYPE)
137 return
137 return
138
138
139 _processhttpv2request(ui, repo, req, res, permission, command, proto)
139 _processhttpv2request(ui, repo, req, res, permission, command, proto)
140
140
141 def _processhttpv2reflectrequest(ui, repo, req, res):
141 def _processhttpv2reflectrequest(ui, repo, req, res):
142 """Reads unified frame protocol request and dumps out state to client.
142 """Reads unified frame protocol request and dumps out state to client.
143
143
144 This special endpoint can be used to help debug the wire protocol.
144 This special endpoint can be used to help debug the wire protocol.
145
145
146 Instead of routing the request through the normal dispatch mechanism,
146 Instead of routing the request through the normal dispatch mechanism,
147 we instead read all frames, decode them, and feed them into our state
147 we instead read all frames, decode them, and feed them into our state
148 tracker. We then dump the log of all that activity back out to the
148 tracker. We then dump the log of all that activity back out to the
149 client.
149 client.
150 """
150 """
151 import json
151 import json
152
152
153 # Reflection APIs have a history of being abused, accidentally disclosing
153 # Reflection APIs have a history of being abused, accidentally disclosing
154 # sensitive data, etc. So we have a config knob.
154 # sensitive data, etc. So we have a config knob.
155 if not ui.configbool('experimental', 'web.api.debugreflect'):
155 if not ui.configbool('experimental', 'web.api.debugreflect'):
156 res.status = b'404 Not Found'
156 res.status = b'404 Not Found'
157 res.headers[b'Content-Type'] = b'text/plain'
157 res.headers[b'Content-Type'] = b'text/plain'
158 res.setbodybytes(_('debugreflect service not available'))
158 res.setbodybytes(_('debugreflect service not available'))
159 return
159 return
160
160
161 # We assume we have a unified framing protocol request body.
161 # We assume we have a unified framing protocol request body.
162
162
163 reactor = wireprotoframing.serverreactor(ui)
163 reactor = wireprotoframing.serverreactor(ui)
164 states = []
164 states = []
165
165
166 while True:
166 while True:
167 frame = wireprotoframing.readframe(req.bodyfh)
167 frame = wireprotoframing.readframe(req.bodyfh)
168
168
169 if not frame:
169 if not frame:
170 states.append(b'received: <no frame>')
170 states.append(b'received: <no frame>')
171 break
171 break
172
172
173 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
173 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
174 frame.requestid,
174 frame.requestid,
175 frame.payload))
175 frame.payload))
176
176
177 action, meta = reactor.onframerecv(frame)
177 action, meta = reactor.onframerecv(frame)
178 states.append(json.dumps((action, meta), sort_keys=True,
178 states.append(json.dumps((action, meta), sort_keys=True,
179 separators=(', ', ': ')))
179 separators=(', ', ': ')))
180
180
181 action, meta = reactor.oninputeof()
181 action, meta = reactor.oninputeof()
182 meta['action'] = action
182 meta['action'] = action
183 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
183 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
184
184
185 res.status = b'200 OK'
185 res.status = b'200 OK'
186 res.headers[b'Content-Type'] = b'text/plain'
186 res.headers[b'Content-Type'] = b'text/plain'
187 res.setbodybytes(b'\n'.join(states))
187 res.setbodybytes(b'\n'.join(states))
188
188
189 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
189 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
190 """Post-validation handler for HTTPv2 requests.
190 """Post-validation handler for HTTPv2 requests.
191
191
192 Called when the HTTP request contains unified frame-based protocol
192 Called when the HTTP request contains unified frame-based protocol
193 frames for evaluation.
193 frames for evaluation.
194 """
194 """
195 # TODO Some HTTP clients are full duplex and can receive data before
195 # TODO Some HTTP clients are full duplex and can receive data before
196 # the entire request is transmitted. Figure out a way to indicate support
196 # the entire request is transmitted. Figure out a way to indicate support
197 # for that so we can opt into full duplex mode.
197 # for that so we can opt into full duplex mode.
198 reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
198 reactor = wireprotoframing.serverreactor(ui, deferoutput=True)
199 seencommand = False
199 seencommand = False
200
200
201 outstream = None
201 outstream = None
202
202
203 while True:
203 while True:
204 frame = wireprotoframing.readframe(req.bodyfh)
204 frame = wireprotoframing.readframe(req.bodyfh)
205 if not frame:
205 if not frame:
206 break
206 break
207
207
208 action, meta = reactor.onframerecv(frame)
208 action, meta = reactor.onframerecv(frame)
209
209
210 if action == 'wantframe':
210 if action == 'wantframe':
211 # Need more data before we can do anything.
211 # Need more data before we can do anything.
212 continue
212 continue
213 elif action == 'runcommand':
213 elif action == 'runcommand':
214 # Defer creating output stream because we need to wait for
214 # Defer creating output stream because we need to wait for
215 # protocol settings frames so proper encoding can be applied.
215 # protocol settings frames so proper encoding can be applied.
216 if not outstream:
216 if not outstream:
217 outstream = reactor.makeoutputstream()
217 outstream = reactor.makeoutputstream()
218
218
219 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
219 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
220 reqcommand, reactor, outstream,
220 reqcommand, reactor, outstream,
221 meta, issubsequent=seencommand)
221 meta, issubsequent=seencommand)
222
222
223 if sentoutput:
223 if sentoutput:
224 return
224 return
225
225
226 seencommand = True
226 seencommand = True
227
227
228 elif action == 'error':
228 elif action == 'error':
229 # TODO define proper error mechanism.
229 # TODO define proper error mechanism.
230 res.status = b'200 OK'
230 res.status = b'200 OK'
231 res.headers[b'Content-Type'] = b'text/plain'
231 res.headers[b'Content-Type'] = b'text/plain'
232 res.setbodybytes(meta['message'] + b'\n')
232 res.setbodybytes(meta['message'] + b'\n')
233 return
233 return
234 else:
234 else:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 'unhandled action from frame processor: %s' % action)
236 'unhandled action from frame processor: %s' % action)
237
237
238 action, meta = reactor.oninputeof()
238 action, meta = reactor.oninputeof()
239 if action == 'sendframes':
239 if action == 'sendframes':
240 # We assume we haven't started sending the response yet. If we're
240 # We assume we haven't started sending the response yet. If we're
241 # wrong, the response type will raise an exception.
241 # wrong, the response type will raise an exception.
242 res.status = b'200 OK'
242 res.status = b'200 OK'
243 res.headers[b'Content-Type'] = FRAMINGTYPE
243 res.headers[b'Content-Type'] = FRAMINGTYPE
244 res.setbodygen(meta['framegen'])
244 res.setbodygen(meta['framegen'])
245 elif action == 'noop':
245 elif action == 'noop':
246 pass
246 pass
247 else:
247 else:
248 raise error.ProgrammingError('unhandled action from frame processor: %s'
248 raise error.ProgrammingError('unhandled action from frame processor: %s'
249 % action)
249 % action)
250
250
251 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
251 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
252 outstream, command, issubsequent):
252 outstream, command, issubsequent):
253 """Dispatch a wire protocol command made from HTTPv2 requests.
253 """Dispatch a wire protocol command made from HTTPv2 requests.
254
254
255 The authenticated permission (``authedperm``) along with the original
255 The authenticated permission (``authedperm``) along with the original
256 command from the URL (``reqcommand``) are passed in.
256 command from the URL (``reqcommand``) are passed in.
257 """
257 """
258 # We already validated that the session has permissions to perform the
258 # We already validated that the session has permissions to perform the
259 # actions in ``authedperm``. In the unified frame protocol, the canonical
259 # actions in ``authedperm``. In the unified frame protocol, the canonical
260 # command to run is expressed in a frame. However, the URL also requested
260 # command to run is expressed in a frame. However, the URL also requested
261 # to run a specific command. We need to be careful that the command we
261 # to run a specific command. We need to be careful that the command we
262 # run doesn't have permissions requirements greater than what was granted
262 # run doesn't have permissions requirements greater than what was granted
263 # by ``authedperm``.
263 # by ``authedperm``.
264 #
264 #
265 # Our rule for this is we only allow one command per HTTP request and
265 # Our rule for this is we only allow one command per HTTP request and
266 # that command must match the command in the URL. However, we make
266 # that command must match the command in the URL. However, we make
267 # an exception for the ``multirequest`` URL. This URL is allowed to
267 # an exception for the ``multirequest`` URL. This URL is allowed to
268 # execute multiple commands. We double check permissions of each command
268 # execute multiple commands. We double check permissions of each command
269 # as it is invoked to ensure there is no privilege escalation.
269 # as it is invoked to ensure there is no privilege escalation.
270 # TODO consider allowing multiple commands to regular command URLs
270 # TODO consider allowing multiple commands to regular command URLs
271 # iff each command is the same.
271 # iff each command is the same.
272
272
273 proto = httpv2protocolhandler(req, ui, args=command['args'])
273 proto = httpv2protocolhandler(req, ui, args=command['args'])
274
274
275 if reqcommand == b'multirequest':
275 if reqcommand == b'multirequest':
276 if not COMMANDS.commandavailable(command['command'], proto):
276 if not COMMANDS.commandavailable(command['command'], proto):
277 # TODO proper error mechanism
277 # TODO proper error mechanism
278 res.status = b'200 OK'
278 res.status = b'200 OK'
279 res.headers[b'Content-Type'] = b'text/plain'
279 res.headers[b'Content-Type'] = b'text/plain'
280 res.setbodybytes(_('wire protocol command not available: %s') %
280 res.setbodybytes(_('wire protocol command not available: %s') %
281 command['command'])
281 command['command'])
282 return True
282 return True
283
283
284 # TODO don't use assert here, since it may be elided by -O.
284 # TODO don't use assert here, since it may be elided by -O.
285 assert authedperm in (b'ro', b'rw')
285 assert authedperm in (b'ro', b'rw')
286 wirecommand = COMMANDS[command['command']]
286 wirecommand = COMMANDS[command['command']]
287 assert wirecommand.permission in ('push', 'pull')
287 assert wirecommand.permission in ('push', 'pull')
288
288
289 if authedperm == b'ro' and wirecommand.permission != 'pull':
289 if authedperm == b'ro' and wirecommand.permission != 'pull':
290 # TODO proper error mechanism
290 # TODO proper error mechanism
291 res.status = b'403 Forbidden'
291 res.status = b'403 Forbidden'
292 res.headers[b'Content-Type'] = b'text/plain'
292 res.headers[b'Content-Type'] = b'text/plain'
293 res.setbodybytes(_('insufficient permissions to execute '
293 res.setbodybytes(_('insufficient permissions to execute '
294 'command: %s') % command['command'])
294 'command: %s') % command['command'])
295 return True
295 return True
296
296
297 # TODO should we also call checkperm() here? Maybe not if we're going
297 # TODO should we also call checkperm() here? Maybe not if we're going
298 # to overhaul that API. The granted scope from the URL check should
298 # to overhaul that API. The granted scope from the URL check should
299 # be good enough.
299 # be good enough.
300
300
301 else:
301 else:
302 # Don't allow multiple commands outside of ``multirequest`` URL.
302 # Don't allow multiple commands outside of ``multirequest`` URL.
303 if issubsequent:
303 if issubsequent:
304 # TODO proper error mechanism
304 # TODO proper error mechanism
305 res.status = b'200 OK'
305 res.status = b'200 OK'
306 res.headers[b'Content-Type'] = b'text/plain'
306 res.headers[b'Content-Type'] = b'text/plain'
307 res.setbodybytes(_('multiple commands cannot be issued to this '
307 res.setbodybytes(_('multiple commands cannot be issued to this '
308 'URL'))
308 'URL'))
309 return True
309 return True
310
310
311 if reqcommand != command['command']:
311 if reqcommand != command['command']:
312 # TODO define proper error mechanism
312 # TODO define proper error mechanism
313 res.status = b'200 OK'
313 res.status = b'200 OK'
314 res.headers[b'Content-Type'] = b'text/plain'
314 res.headers[b'Content-Type'] = b'text/plain'
315 res.setbodybytes(_('command in frame must match command in URL'))
315 res.setbodybytes(_('command in frame must match command in URL'))
316 return True
316 return True
317
317
318 res.status = b'200 OK'
318 res.status = b'200 OK'
319 res.headers[b'Content-Type'] = FRAMINGTYPE
319 res.headers[b'Content-Type'] = FRAMINGTYPE
320
320
321 try:
321 try:
322 objs = dispatch(repo, proto, command['command'], command['redirect'])
322 objs = dispatch(repo, proto, command['command'], command['redirect'])
323
323
324 action, meta = reactor.oncommandresponsereadyobjects(
324 action, meta = reactor.oncommandresponsereadyobjects(
325 outstream, command['requestid'], objs)
325 outstream, command['requestid'], objs)
326
326
327 except error.WireprotoCommandError as e:
327 except error.WireprotoCommandError as e:
328 action, meta = reactor.oncommanderror(
328 action, meta = reactor.oncommanderror(
329 outstream, command['requestid'], e.message, e.messageargs)
329 outstream, command['requestid'], e.message, e.messageargs)
330
330
331 except Exception as e:
331 except Exception as e:
332 action, meta = reactor.onservererror(
332 action, meta = reactor.onservererror(
333 outstream, command['requestid'],
333 outstream, command['requestid'],
334 _('exception when invoking command: %s') %
334 _('exception when invoking command: %s') %
335 stringutil.forcebytestr(e))
335 stringutil.forcebytestr(e))
336
336
337 if action == 'sendframes':
337 if action == 'sendframes':
338 res.setbodygen(meta['framegen'])
338 res.setbodygen(meta['framegen'])
339 return True
339 return True
340 elif action == 'noop':
340 elif action == 'noop':
341 return False
341 return False
342 else:
342 else:
343 raise error.ProgrammingError('unhandled event from reactor: %s' %
343 raise error.ProgrammingError('unhandled event from reactor: %s' %
344 action)
344 action)
345
345
346 def getdispatchrepo(repo, proto, command):
346 def getdispatchrepo(repo, proto, command):
347 return repo.filtered('served')
347 return repo.filtered('served')
348
348
349 def dispatch(repo, proto, command, redirect):
349 def dispatch(repo, proto, command, redirect):
350 """Run a wire protocol command.
350 """Run a wire protocol command.
351
351
352 Returns an iterable of objects that will be sent to the client.
352 Returns an iterable of objects that will be sent to the client.
353 """
353 """
354 repo = getdispatchrepo(repo, proto, command)
354 repo = getdispatchrepo(repo, proto, command)
355
355
356 entry = COMMANDS[command]
356 entry = COMMANDS[command]
357 func = entry.func
357 func = entry.func
358 spec = entry.args
358 spec = entry.args
359
359
360 args = proto.getargs(spec)
360 args = proto.getargs(spec)
361
361
362 # There is some duplicate boilerplate code here for calling the command and
362 # There is some duplicate boilerplate code here for calling the command and
363 # emitting objects. It is either that or a lot of indented code that looks
363 # emitting objects. It is either that or a lot of indented code that looks
364 # like a pyramid (since there are a lot of code paths that result in not
364 # like a pyramid (since there are a lot of code paths that result in not
365 # using the cacher).
365 # using the cacher).
366 callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
366 callcommand = lambda: func(repo, proto, **pycompat.strkwargs(args))
367
367
368 # Request is not cacheable. Don't bother instantiating a cacher.
368 # Request is not cacheable. Don't bother instantiating a cacher.
369 if not entry.cachekeyfn:
369 if not entry.cachekeyfn:
370 for o in callcommand():
370 for o in callcommand():
371 yield o
371 yield o
372 return
372 return
373
373
374 if redirect:
374 if redirect:
375 redirecttargets = redirect[b'targets']
375 redirecttargets = redirect[b'targets']
376 redirecthashes = redirect[b'hashes']
376 redirecthashes = redirect[b'hashes']
377 else:
377 else:
378 redirecttargets = []
378 redirecttargets = []
379 redirecthashes = []
379 redirecthashes = []
380
380
381 cacher = makeresponsecacher(repo, proto, command, args,
381 cacher = makeresponsecacher(repo, proto, command, args,
382 cborutil.streamencode,
382 cborutil.streamencode,
383 redirecttargets=redirecttargets,
383 redirecttargets=redirecttargets,
384 redirecthashes=redirecthashes)
384 redirecthashes=redirecthashes)
385
385
386 # But we have no cacher. Do default handling.
386 # But we have no cacher. Do default handling.
387 if not cacher:
387 if not cacher:
388 for o in callcommand():
388 for o in callcommand():
389 yield o
389 yield o
390 return
390 return
391
391
392 with cacher:
392 with cacher:
393 cachekey = entry.cachekeyfn(repo, proto, cacher, **args)
393 cachekey = entry.cachekeyfn(repo, proto, cacher,
394 **pycompat.strkwargs(args))
394
395
395 # No cache key or the cacher doesn't like it. Do default handling.
396 # No cache key or the cacher doesn't like it. Do default handling.
396 if cachekey is None or not cacher.setcachekey(cachekey):
397 if cachekey is None or not cacher.setcachekey(cachekey):
397 for o in callcommand():
398 for o in callcommand():
398 yield o
399 yield o
399 return
400 return
400
401
401 # Serve it from the cache, if possible.
402 # Serve it from the cache, if possible.
402 cached = cacher.lookup()
403 cached = cacher.lookup()
403
404
404 if cached:
405 if cached:
405 for o in cached['objs']:
406 for o in cached['objs']:
406 yield o
407 yield o
407 return
408 return
408
409
409 # Else call the command and feed its output into the cacher, allowing
410 # Else call the command and feed its output into the cacher, allowing
410 # the cacher to buffer/mutate objects as it desires.
411 # the cacher to buffer/mutate objects as it desires.
411 for o in callcommand():
412 for o in callcommand():
412 for o in cacher.onobject(o):
413 for o in cacher.onobject(o):
413 yield o
414 yield o
414
415
415 for o in cacher.onfinished():
416 for o in cacher.onfinished():
416 yield o
417 yield o
417
418
418 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
419 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
419 class httpv2protocolhandler(object):
420 class httpv2protocolhandler(object):
420 def __init__(self, req, ui, args=None):
421 def __init__(self, req, ui, args=None):
421 self._req = req
422 self._req = req
422 self._ui = ui
423 self._ui = ui
423 self._args = args
424 self._args = args
424
425
425 @property
426 @property
426 def name(self):
427 def name(self):
427 return HTTP_WIREPROTO_V2
428 return HTTP_WIREPROTO_V2
428
429
429 def getargs(self, args):
430 def getargs(self, args):
430 # First look for args that were passed but aren't registered on this
431 # First look for args that were passed but aren't registered on this
431 # command.
432 # command.
432 extra = set(self._args) - set(args)
433 extra = set(self._args) - set(args)
433 if extra:
434 if extra:
434 raise error.WireprotoCommandError(
435 raise error.WireprotoCommandError(
435 'unsupported argument to command: %s' %
436 'unsupported argument to command: %s' %
436 ', '.join(sorted(extra)))
437 ', '.join(sorted(extra)))
437
438
438 # And look for required arguments that are missing.
439 # And look for required arguments that are missing.
439 missing = {a for a in args if args[a]['required']} - set(self._args)
440 missing = {a for a in args if args[a]['required']} - set(self._args)
440
441
441 if missing:
442 if missing:
442 raise error.WireprotoCommandError(
443 raise error.WireprotoCommandError(
443 'missing required arguments: %s' % ', '.join(sorted(missing)))
444 'missing required arguments: %s' % ', '.join(sorted(missing)))
444
445
445 # Now derive the arguments to pass to the command, taking into
446 # Now derive the arguments to pass to the command, taking into
446 # account the arguments specified by the client.
447 # account the arguments specified by the client.
447 data = {}
448 data = {}
448 for k, meta in sorted(args.items()):
449 for k, meta in sorted(args.items()):
449 # This argument wasn't passed by the client.
450 # This argument wasn't passed by the client.
450 if k not in self._args:
451 if k not in self._args:
451 data[k] = meta['default']()
452 data[k] = meta['default']()
452 continue
453 continue
453
454
454 v = self._args[k]
455 v = self._args[k]
455
456
456 # Sets may be expressed as lists. Silently normalize.
457 # Sets may be expressed as lists. Silently normalize.
457 if meta['type'] == 'set' and isinstance(v, list):
458 if meta['type'] == 'set' and isinstance(v, list):
458 v = set(v)
459 v = set(v)
459
460
460 # TODO consider more/stronger type validation.
461 # TODO consider more/stronger type validation.
461
462
462 data[k] = v
463 data[k] = v
463
464
464 return data
465 return data
465
466
466 def getprotocaps(self):
467 def getprotocaps(self):
467 # Protocol capabilities are currently not implemented for HTTP V2.
468 # Protocol capabilities are currently not implemented for HTTP V2.
468 return set()
469 return set()
469
470
470 def getpayload(self):
471 def getpayload(self):
471 raise NotImplementedError
472 raise NotImplementedError
472
473
473 @contextlib.contextmanager
474 @contextlib.contextmanager
474 def mayberedirectstdio(self):
475 def mayberedirectstdio(self):
475 raise NotImplementedError
476 raise NotImplementedError
476
477
477 def client(self):
478 def client(self):
478 raise NotImplementedError
479 raise NotImplementedError
479
480
480 def addcapabilities(self, repo, caps):
481 def addcapabilities(self, repo, caps):
481 return caps
482 return caps
482
483
483 def checkperm(self, perm):
484 def checkperm(self, perm):
484 raise NotImplementedError
485 raise NotImplementedError
485
486
486 def httpv2apidescriptor(req, repo):
487 def httpv2apidescriptor(req, repo):
487 proto = httpv2protocolhandler(req, repo.ui)
488 proto = httpv2protocolhandler(req, repo.ui)
488
489
489 return _capabilitiesv2(repo, proto)
490 return _capabilitiesv2(repo, proto)
490
491
491 def _capabilitiesv2(repo, proto):
492 def _capabilitiesv2(repo, proto):
492 """Obtain the set of capabilities for version 2 transports.
493 """Obtain the set of capabilities for version 2 transports.
493
494
494 These capabilities are distinct from the capabilities for version 1
495 These capabilities are distinct from the capabilities for version 1
495 transports.
496 transports.
496 """
497 """
497 caps = {
498 caps = {
498 'commands': {},
499 'commands': {},
499 'framingmediatypes': [FRAMINGTYPE],
500 'framingmediatypes': [FRAMINGTYPE],
500 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
501 'pathfilterprefixes': set(narrowspec.VALID_PREFIXES),
501 }
502 }
502
503
503 for command, entry in COMMANDS.items():
504 for command, entry in COMMANDS.items():
504 args = {}
505 args = {}
505
506
506 for arg, meta in entry.args.items():
507 for arg, meta in entry.args.items():
507 args[arg] = {
508 args[arg] = {
508 # TODO should this be a normalized type using CBOR's
509 # TODO should this be a normalized type using CBOR's
509 # terminology?
510 # terminology?
510 b'type': meta['type'],
511 b'type': meta['type'],
511 b'required': meta['required'],
512 b'required': meta['required'],
512 }
513 }
513
514
514 if not meta['required']:
515 if not meta['required']:
515 args[arg][b'default'] = meta['default']()
516 args[arg][b'default'] = meta['default']()
516
517
517 if meta['validvalues']:
518 if meta['validvalues']:
518 args[arg][b'validvalues'] = meta['validvalues']
519 args[arg][b'validvalues'] = meta['validvalues']
519
520
520 # TODO this type of check should be defined in a per-command callback.
521 # TODO this type of check should be defined in a per-command callback.
521 if (command == b'rawstorefiledata'
522 if (command == b'rawstorefiledata'
522 and not streamclone.allowservergeneration(repo)):
523 and not streamclone.allowservergeneration(repo)):
523 continue
524 continue
524
525
525 caps['commands'][command] = {
526 caps['commands'][command] = {
526 'args': args,
527 'args': args,
527 'permissions': [entry.permission],
528 'permissions': [entry.permission],
528 }
529 }
529
530
530 if entry.extracapabilitiesfn:
531 if entry.extracapabilitiesfn:
531 extracaps = entry.extracapabilitiesfn(repo, proto)
532 extracaps = entry.extracapabilitiesfn(repo, proto)
532 caps['commands'][command].update(extracaps)
533 caps['commands'][command].update(extracaps)
533
534
534 caps['rawrepoformats'] = sorted(repo.requirements &
535 caps['rawrepoformats'] = sorted(repo.requirements &
535 repo.supportedformats)
536 repo.supportedformats)
536
537
537 targets = getadvertisedredirecttargets(repo, proto)
538 targets = getadvertisedredirecttargets(repo, proto)
538 if targets:
539 if targets:
539 caps[b'redirect'] = {
540 caps[b'redirect'] = {
540 b'targets': [],
541 b'targets': [],
541 b'hashes': [b'sha256', b'sha1'],
542 b'hashes': [b'sha256', b'sha1'],
542 }
543 }
543
544
544 for target in targets:
545 for target in targets:
545 entry = {
546 entry = {
546 b'name': target['name'],
547 b'name': target['name'],
547 b'protocol': target['protocol'],
548 b'protocol': target['protocol'],
548 b'uris': target['uris'],
549 b'uris': target['uris'],
549 }
550 }
550
551
551 for key in ('snirequired', 'tlsversions'):
552 for key in ('snirequired', 'tlsversions'):
552 if key in target:
553 if key in target:
553 entry[key] = target[key]
554 entry[key] = target[key]
554
555
555 caps[b'redirect'][b'targets'].append(entry)
556 caps[b'redirect'][b'targets'].append(entry)
556
557
557 return proto.addcapabilities(repo, caps)
558 return proto.addcapabilities(repo, caps)
558
559
559 def getadvertisedredirecttargets(repo, proto):
560 def getadvertisedredirecttargets(repo, proto):
560 """Obtain a list of content redirect targets.
561 """Obtain a list of content redirect targets.
561
562
562 Returns a list containing potential redirect targets that will be
563 Returns a list containing potential redirect targets that will be
563 advertised in capabilities data. Each dict MUST have the following
564 advertised in capabilities data. Each dict MUST have the following
564 keys:
565 keys:
565
566
566 name
567 name
567 The name of this redirect target. This is the identifier clients use
568 The name of this redirect target. This is the identifier clients use
568 to refer to a target. It is transferred as part of every command
569 to refer to a target. It is transferred as part of every command
569 request.
570 request.
570
571
571 protocol
572 protocol
572 Network protocol used by this target. Typically this is the string
573 Network protocol used by this target. Typically this is the string
573 in front of the ``://`` in a URL. e.g. ``https``.
574 in front of the ``://`` in a URL. e.g. ``https``.
574
575
575 uris
576 uris
576 List of representative URIs for this target. Clients can use the
577 List of representative URIs for this target. Clients can use the
577 URIs to test parsing for compatibility or for ordering preference
578 URIs to test parsing for compatibility or for ordering preference
578 for which target to use.
579 for which target to use.
579
580
580 The following optional keys are recognized:
581 The following optional keys are recognized:
581
582
582 snirequired
583 snirequired
583 Bool indicating if Server Name Indication (SNI) is required to
584 Bool indicating if Server Name Indication (SNI) is required to
584 connect to this target.
585 connect to this target.
585
586
586 tlsversions
587 tlsversions
587 List of bytes indicating which TLS versions are supported by this
588 List of bytes indicating which TLS versions are supported by this
588 target.
589 target.
589
590
590 By default, clients reflect the target order advertised by servers
591 By default, clients reflect the target order advertised by servers
591 and servers will use the first client-advertised target when picking
592 and servers will use the first client-advertised target when picking
592 a redirect target. So targets should be advertised in the order the
593 a redirect target. So targets should be advertised in the order the
593 server prefers they be used.
594 server prefers they be used.
594 """
595 """
595 return []
596 return []
596
597
597 def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
598 def wireprotocommand(name, args=None, permission='push', cachekeyfn=None,
598 extracapabilitiesfn=None):
599 extracapabilitiesfn=None):
599 """Decorator to declare a wire protocol command.
600 """Decorator to declare a wire protocol command.
600
601
601 ``name`` is the name of the wire protocol command being provided.
602 ``name`` is the name of the wire protocol command being provided.
602
603
603 ``args`` is a dict defining arguments accepted by the command. Keys are
604 ``args`` is a dict defining arguments accepted by the command. Keys are
604 the argument name. Values are dicts with the following keys:
605 the argument name. Values are dicts with the following keys:
605
606
606 ``type``
607 ``type``
607 The argument data type. Must be one of the following string
608 The argument data type. Must be one of the following string
608 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
609 literals: ``bytes``, ``int``, ``list``, ``dict``, ``set``,
609 or ``bool``.
610 or ``bool``.
610
611
611 ``default``
612 ``default``
612 A callable returning the default value for this argument. If not
613 A callable returning the default value for this argument. If not
613 specified, ``None`` will be the default value.
614 specified, ``None`` will be the default value.
614
615
615 ``example``
616 ``example``
616 An example value for this argument.
617 An example value for this argument.
617
618
618 ``validvalues``
619 ``validvalues``
619 Set of recognized values for this argument.
620 Set of recognized values for this argument.
620
621
621 ``permission`` defines the permission type needed to run this command.
622 ``permission`` defines the permission type needed to run this command.
622 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
623 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
623 respectively. Default is to assume command requires ``push`` permissions
624 respectively. Default is to assume command requires ``push`` permissions
624 because otherwise commands not declaring their permissions could modify
625 because otherwise commands not declaring their permissions could modify
625 a repository that is supposed to be read-only.
626 a repository that is supposed to be read-only.
626
627
627 ``cachekeyfn`` defines an optional callable that can derive the
628 ``cachekeyfn`` defines an optional callable that can derive the
628 cache key for this request.
629 cache key for this request.
629
630
630 ``extracapabilitiesfn`` defines an optional callable that defines extra
631 ``extracapabilitiesfn`` defines an optional callable that defines extra
631 command capabilities/parameters that are advertised next to the command
632 command capabilities/parameters that are advertised next to the command
632 in the capabilities data structure describing the server. The callable
633 in the capabilities data structure describing the server. The callable
633 receives as arguments the repository and protocol objects. It returns
634 receives as arguments the repository and protocol objects. It returns
634 a dict of extra fields to add to the command descriptor.
635 a dict of extra fields to add to the command descriptor.
635
636
636 Wire protocol commands are generators of objects to be serialized and
637 Wire protocol commands are generators of objects to be serialized and
637 sent to the client.
638 sent to the client.
638
639
639 If a command raises an uncaught exception, this will be translated into
640 If a command raises an uncaught exception, this will be translated into
640 a command error.
641 a command error.
641
642
642 All commands can opt in to being cacheable by defining a function
643 All commands can opt in to being cacheable by defining a function
643 (``cachekeyfn``) that is called to derive a cache key. This function
644 (``cachekeyfn``) that is called to derive a cache key. This function
644 receives the same arguments as the command itself plus a ``cacher``
645 receives the same arguments as the command itself plus a ``cacher``
645 argument containing the active cacher for the request and returns a bytes
646 argument containing the active cacher for the request and returns a bytes
646 containing the key in a cache the response to this command may be cached
647 containing the key in a cache the response to this command may be cached
647 under.
648 under.
648 """
649 """
649 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
650 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
650 if v['version'] == 2}
651 if v['version'] == 2}
651
652
652 if permission not in ('push', 'pull'):
653 if permission not in ('push', 'pull'):
653 raise error.ProgrammingError('invalid wire protocol permission; '
654 raise error.ProgrammingError('invalid wire protocol permission; '
654 'got %s; expected "push" or "pull"' %
655 'got %s; expected "push" or "pull"' %
655 permission)
656 permission)
656
657
657 if args is None:
658 if args is None:
658 args = {}
659 args = {}
659
660
660 if not isinstance(args, dict):
661 if not isinstance(args, dict):
661 raise error.ProgrammingError('arguments for version 2 commands '
662 raise error.ProgrammingError('arguments for version 2 commands '
662 'must be declared as dicts')
663 'must be declared as dicts')
663
664
664 for arg, meta in args.items():
665 for arg, meta in args.items():
665 if arg == '*':
666 if arg == '*':
666 raise error.ProgrammingError('* argument name not allowed on '
667 raise error.ProgrammingError('* argument name not allowed on '
667 'version 2 commands')
668 'version 2 commands')
668
669
669 if not isinstance(meta, dict):
670 if not isinstance(meta, dict):
670 raise error.ProgrammingError('arguments for version 2 commands '
671 raise error.ProgrammingError('arguments for version 2 commands '
671 'must declare metadata as a dict')
672 'must declare metadata as a dict')
672
673
673 if 'type' not in meta:
674 if 'type' not in meta:
674 raise error.ProgrammingError('%s argument for command %s does not '
675 raise error.ProgrammingError('%s argument for command %s does not '
675 'declare type field' % (arg, name))
676 'declare type field' % (arg, name))
676
677
677 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
678 if meta['type'] not in ('bytes', 'int', 'list', 'dict', 'set', 'bool'):
678 raise error.ProgrammingError('%s argument for command %s has '
679 raise error.ProgrammingError('%s argument for command %s has '
679 'illegal type: %s' % (arg, name,
680 'illegal type: %s' % (arg, name,
680 meta['type']))
681 meta['type']))
681
682
682 if 'example' not in meta:
683 if 'example' not in meta:
683 raise error.ProgrammingError('%s argument for command %s does not '
684 raise error.ProgrammingError('%s argument for command %s does not '
684 'declare example field' % (arg, name))
685 'declare example field' % (arg, name))
685
686
686 meta['required'] = 'default' not in meta
687 meta['required'] = 'default' not in meta
687
688
688 meta.setdefault('default', lambda: None)
689 meta.setdefault('default', lambda: None)
689 meta.setdefault('validvalues', None)
690 meta.setdefault('validvalues', None)
690
691
691 def register(func):
692 def register(func):
692 if name in COMMANDS:
693 if name in COMMANDS:
693 raise error.ProgrammingError('%s command already registered '
694 raise error.ProgrammingError('%s command already registered '
694 'for version 2' % name)
695 'for version 2' % name)
695
696
696 COMMANDS[name] = wireprototypes.commandentry(
697 COMMANDS[name] = wireprototypes.commandentry(
697 func, args=args, transports=transports, permission=permission,
698 func, args=args, transports=transports, permission=permission,
698 cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
699 cachekeyfn=cachekeyfn, extracapabilitiesfn=extracapabilitiesfn)
699
700
700 return func
701 return func
701
702
702 return register
703 return register
703
704
704 def makecommandcachekeyfn(command, localversion=None, allargs=False):
705 def makecommandcachekeyfn(command, localversion=None, allargs=False):
705 """Construct a cache key derivation function with common features.
706 """Construct a cache key derivation function with common features.
706
707
707 By default, the cache key is a hash of:
708 By default, the cache key is a hash of:
708
709
709 * The command name.
710 * The command name.
710 * A global cache version number.
711 * A global cache version number.
711 * A local cache version number (passed via ``localversion``).
712 * A local cache version number (passed via ``localversion``).
712 * All the arguments passed to the command.
713 * All the arguments passed to the command.
713 * The media type used.
714 * The media type used.
714 * Wire protocol version string.
715 * Wire protocol version string.
715 * The repository path.
716 * The repository path.
716 """
717 """
717 if not allargs:
718 if not allargs:
718 raise error.ProgrammingError('only allargs=True is currently supported')
719 raise error.ProgrammingError('only allargs=True is currently supported')
719
720
720 if localversion is None:
721 if localversion is None:
721 raise error.ProgrammingError('must set localversion argument value')
722 raise error.ProgrammingError('must set localversion argument value')
722
723
723 def cachekeyfn(repo, proto, cacher, **args):
724 def cachekeyfn(repo, proto, cacher, **args):
724 spec = COMMANDS[command]
725 spec = COMMANDS[command]
725
726
726 # Commands that mutate the repo can not be cached.
727 # Commands that mutate the repo can not be cached.
727 if spec.permission == 'push':
728 if spec.permission == 'push':
728 return None
729 return None
729
730
730 # TODO config option to disable caching.
731 # TODO config option to disable caching.
731
732
732 # Our key derivation strategy is to construct a data structure
733 # Our key derivation strategy is to construct a data structure
733 # holding everything that could influence cacheability and to hash
734 # holding everything that could influence cacheability and to hash
734 # the CBOR representation of that. Using CBOR seems like it might
735 # the CBOR representation of that. Using CBOR seems like it might
735 # be overkill. However, simpler hashing mechanisms are prone to
736 # be overkill. However, simpler hashing mechanisms are prone to
736 # duplicate input issues. e.g. if you just concatenate two values,
737 # duplicate input issues. e.g. if you just concatenate two values,
737 # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
738 # "foo"+"bar" is identical to "fo"+"obar". Using CBOR provides
738 # "padding" between values and prevents these problems.
739 # "padding" between values and prevents these problems.
739
740
740 # Seed the hash with various data.
741 # Seed the hash with various data.
741 state = {
742 state = {
742 # To invalidate all cache keys.
743 # To invalidate all cache keys.
743 b'globalversion': GLOBAL_CACHE_VERSION,
744 b'globalversion': GLOBAL_CACHE_VERSION,
744 # More granular cache key invalidation.
745 # More granular cache key invalidation.
745 b'localversion': localversion,
746 b'localversion': localversion,
746 # Cache keys are segmented by command.
747 # Cache keys are segmented by command.
747 b'command': pycompat.sysbytes(command),
748 b'command': pycompat.sysbytes(command),
748 # Throw in the media type and API version strings so changes
749 # Throw in the media type and API version strings so changes
749 # to exchange semantics invalid cache.
750 # to exchange semantics invalid cache.
750 b'mediatype': FRAMINGTYPE,
751 b'mediatype': FRAMINGTYPE,
751 b'version': HTTP_WIREPROTO_V2,
752 b'version': HTTP_WIREPROTO_V2,
752 # So same requests for different repos don't share cache keys.
753 # So same requests for different repos don't share cache keys.
753 b'repo': repo.root,
754 b'repo': repo.root,
754 }
755 }
755
756
756 # The arguments passed to us will have already been normalized.
757 # The arguments passed to us will have already been normalized.
757 # Default values will be set, etc. This is important because it
758 # Default values will be set, etc. This is important because it
758 # means that it doesn't matter if clients send an explicit argument
759 # means that it doesn't matter if clients send an explicit argument
759 # or rely on the default value: it will all normalize to the same
760 # or rely on the default value: it will all normalize to the same
760 # set of arguments on the server and therefore the same cache key.
761 # set of arguments on the server and therefore the same cache key.
761 #
762 #
762 # Arguments by their very nature must support being encoded to CBOR.
763 # Arguments by their very nature must support being encoded to CBOR.
763 # And the CBOR encoder is deterministic. So we hash the arguments
764 # And the CBOR encoder is deterministic. So we hash the arguments
764 # by feeding the CBOR of their representation into the hasher.
765 # by feeding the CBOR of their representation into the hasher.
765 if allargs:
766 if allargs:
766 state[b'args'] = pycompat.byteskwargs(args)
767 state[b'args'] = pycompat.byteskwargs(args)
767
768
768 cacher.adjustcachekeystate(state)
769 cacher.adjustcachekeystate(state)
769
770
770 hasher = hashlib.sha1()
771 hasher = hashlib.sha1()
771 for chunk in cborutil.streamencode(state):
772 for chunk in cborutil.streamencode(state):
772 hasher.update(chunk)
773 hasher.update(chunk)
773
774
774 return pycompat.sysbytes(hasher.hexdigest())
775 return pycompat.sysbytes(hasher.hexdigest())
775
776
776 return cachekeyfn
777 return cachekeyfn
777
778
778 def makeresponsecacher(repo, proto, command, args, objencoderfn,
779 def makeresponsecacher(repo, proto, command, args, objencoderfn,
779 redirecttargets, redirecthashes):
780 redirecttargets, redirecthashes):
780 """Construct a cacher for a cacheable command.
781 """Construct a cacher for a cacheable command.
781
782
782 Returns an ``iwireprotocolcommandcacher`` instance.
783 Returns an ``iwireprotocolcommandcacher`` instance.
783
784
784 Extensions can monkeypatch this function to provide custom caching
785 Extensions can monkeypatch this function to provide custom caching
785 backends.
786 backends.
786 """
787 """
787 return None
788 return None
788
789
789 def resolvenodes(repo, revisions):
790 def resolvenodes(repo, revisions):
790 """Resolve nodes from a revisions specifier data structure."""
791 """Resolve nodes from a revisions specifier data structure."""
791 cl = repo.changelog
792 cl = repo.changelog
792 clhasnode = cl.hasnode
793 clhasnode = cl.hasnode
793
794
794 seen = set()
795 seen = set()
795 nodes = []
796 nodes = []
796
797
797 if not isinstance(revisions, list):
798 if not isinstance(revisions, list):
798 raise error.WireprotoCommandError('revisions must be defined as an '
799 raise error.WireprotoCommandError('revisions must be defined as an '
799 'array')
800 'array')
800
801
801 for spec in revisions:
802 for spec in revisions:
802 if b'type' not in spec:
803 if b'type' not in spec:
803 raise error.WireprotoCommandError(
804 raise error.WireprotoCommandError(
804 'type key not present in revision specifier')
805 'type key not present in revision specifier')
805
806
806 typ = spec[b'type']
807 typ = spec[b'type']
807
808
808 if typ == b'changesetexplicit':
809 if typ == b'changesetexplicit':
809 if b'nodes' not in spec:
810 if b'nodes' not in spec:
810 raise error.WireprotoCommandError(
811 raise error.WireprotoCommandError(
811 'nodes key not present in changesetexplicit revision '
812 'nodes key not present in changesetexplicit revision '
812 'specifier')
813 'specifier')
813
814
814 for node in spec[b'nodes']:
815 for node in spec[b'nodes']:
815 if node not in seen:
816 if node not in seen:
816 nodes.append(node)
817 nodes.append(node)
817 seen.add(node)
818 seen.add(node)
818
819
819 elif typ == b'changesetexplicitdepth':
820 elif typ == b'changesetexplicitdepth':
820 for key in (b'nodes', b'depth'):
821 for key in (b'nodes', b'depth'):
821 if key not in spec:
822 if key not in spec:
822 raise error.WireprotoCommandError(
823 raise error.WireprotoCommandError(
823 '%s key not present in changesetexplicitdepth revision '
824 '%s key not present in changesetexplicitdepth revision '
824 'specifier', (key,))
825 'specifier', (key,))
825
826
826 for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
827 for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
827 spec[b'depth'] - 1):
828 spec[b'depth'] - 1):
828 node = cl.node(rev)
829 node = cl.node(rev)
829
830
830 if node not in seen:
831 if node not in seen:
831 nodes.append(node)
832 nodes.append(node)
832 seen.add(node)
833 seen.add(node)
833
834
834 elif typ == b'changesetdagrange':
835 elif typ == b'changesetdagrange':
835 for key in (b'roots', b'heads'):
836 for key in (b'roots', b'heads'):
836 if key not in spec:
837 if key not in spec:
837 raise error.WireprotoCommandError(
838 raise error.WireprotoCommandError(
838 '%s key not present in changesetdagrange revision '
839 '%s key not present in changesetdagrange revision '
839 'specifier', (key,))
840 'specifier', (key,))
840
841
841 if not spec[b'heads']:
842 if not spec[b'heads']:
842 raise error.WireprotoCommandError(
843 raise error.WireprotoCommandError(
843 'heads key in changesetdagrange cannot be empty')
844 'heads key in changesetdagrange cannot be empty')
844
845
845 if spec[b'roots']:
846 if spec[b'roots']:
846 common = [n for n in spec[b'roots'] if clhasnode(n)]
847 common = [n for n in spec[b'roots'] if clhasnode(n)]
847 else:
848 else:
848 common = [nullid]
849 common = [nullid]
849
850
850 for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
851 for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
851 if n not in seen:
852 if n not in seen:
852 nodes.append(n)
853 nodes.append(n)
853 seen.add(n)
854 seen.add(n)
854
855
855 else:
856 else:
856 raise error.WireprotoCommandError(
857 raise error.WireprotoCommandError(
857 'unknown revision specifier type: %s', (typ,))
858 'unknown revision specifier type: %s', (typ,))
858
859
859 return nodes
860 return nodes
860
861
861 @wireprotocommand('branchmap', permission='pull')
862 @wireprotocommand('branchmap', permission='pull')
862 def branchmapv2(repo, proto):
863 def branchmapv2(repo, proto):
863 yield {encoding.fromlocal(k): v
864 yield {encoding.fromlocal(k): v
864 for k, v in repo.branchmap().iteritems()}
865 for k, v in repo.branchmap().iteritems()}
865
866
866 @wireprotocommand('capabilities', permission='pull')
867 @wireprotocommand('capabilities', permission='pull')
867 def capabilitiesv2(repo, proto):
868 def capabilitiesv2(repo, proto):
868 yield _capabilitiesv2(repo, proto)
869 yield _capabilitiesv2(repo, proto)
869
870
870 @wireprotocommand(
871 @wireprotocommand(
871 'changesetdata',
872 'changesetdata',
872 args={
873 args={
873 'revisions': {
874 'revisions': {
874 'type': 'list',
875 'type': 'list',
875 'example': [{
876 'example': [{
876 b'type': b'changesetexplicit',
877 b'type': b'changesetexplicit',
877 b'nodes': [b'abcdef...'],
878 b'nodes': [b'abcdef...'],
878 }],
879 }],
879 },
880 },
880 'fields': {
881 'fields': {
881 'type': 'set',
882 'type': 'set',
882 'default': set,
883 'default': set,
883 'example': {b'parents', b'revision'},
884 'example': {b'parents', b'revision'},
884 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
885 'validvalues': {b'bookmarks', b'parents', b'phase', b'revision'},
885 },
886 },
886 },
887 },
887 permission='pull')
888 permission='pull')
888 def changesetdata(repo, proto, revisions, fields):
889 def changesetdata(repo, proto, revisions, fields):
889 # TODO look for unknown fields and abort when they can't be serviced.
890 # TODO look for unknown fields and abort when they can't be serviced.
890 # This could probably be validated by dispatcher using validvalues.
891 # This could probably be validated by dispatcher using validvalues.
891
892
892 cl = repo.changelog
893 cl = repo.changelog
893 outgoing = resolvenodes(repo, revisions)
894 outgoing = resolvenodes(repo, revisions)
894 publishing = repo.publishing()
895 publishing = repo.publishing()
895
896
896 if outgoing:
897 if outgoing:
897 repo.hook('preoutgoing', throw=True, source='serve')
898 repo.hook('preoutgoing', throw=True, source='serve')
898
899
899 yield {
900 yield {
900 b'totalitems': len(outgoing),
901 b'totalitems': len(outgoing),
901 }
902 }
902
903
903 # The phases of nodes already transferred to the client may have changed
904 # The phases of nodes already transferred to the client may have changed
904 # since the client last requested data. We send phase-only records
905 # since the client last requested data. We send phase-only records
905 # for these revisions, if requested.
906 # for these revisions, if requested.
906 # TODO actually do this. We'll probably want to emit phase heads
907 # TODO actually do this. We'll probably want to emit phase heads
907 # in the ancestry set of the outgoing revisions. This will ensure
908 # in the ancestry set of the outgoing revisions. This will ensure
908 # that phase updates within that set are seen.
909 # that phase updates within that set are seen.
909 if b'phase' in fields:
910 if b'phase' in fields:
910 pass
911 pass
911
912
912 nodebookmarks = {}
913 nodebookmarks = {}
913 for mark, node in repo._bookmarks.items():
914 for mark, node in repo._bookmarks.items():
914 nodebookmarks.setdefault(node, set()).add(mark)
915 nodebookmarks.setdefault(node, set()).add(mark)
915
916
916 # It is already topologically sorted by revision number.
917 # It is already topologically sorted by revision number.
917 for node in outgoing:
918 for node in outgoing:
918 d = {
919 d = {
919 b'node': node,
920 b'node': node,
920 }
921 }
921
922
922 if b'parents' in fields:
923 if b'parents' in fields:
923 d[b'parents'] = cl.parents(node)
924 d[b'parents'] = cl.parents(node)
924
925
925 if b'phase' in fields:
926 if b'phase' in fields:
926 if publishing:
927 if publishing:
927 d[b'phase'] = b'public'
928 d[b'phase'] = b'public'
928 else:
929 else:
929 ctx = repo[node]
930 ctx = repo[node]
930 d[b'phase'] = ctx.phasestr()
931 d[b'phase'] = ctx.phasestr()
931
932
932 if b'bookmarks' in fields and node in nodebookmarks:
933 if b'bookmarks' in fields and node in nodebookmarks:
933 d[b'bookmarks'] = sorted(nodebookmarks[node])
934 d[b'bookmarks'] = sorted(nodebookmarks[node])
934 del nodebookmarks[node]
935 del nodebookmarks[node]
935
936
936 followingmeta = []
937 followingmeta = []
937 followingdata = []
938 followingdata = []
938
939
939 if b'revision' in fields:
940 if b'revision' in fields:
940 revisiondata = cl.revision(node, raw=True)
941 revisiondata = cl.revision(node, raw=True)
941 followingmeta.append((b'revision', len(revisiondata)))
942 followingmeta.append((b'revision', len(revisiondata)))
942 followingdata.append(revisiondata)
943 followingdata.append(revisiondata)
943
944
944 # TODO make it possible for extensions to wrap a function or register
945 # TODO make it possible for extensions to wrap a function or register
945 # a handler to service custom fields.
946 # a handler to service custom fields.
946
947
947 if followingmeta:
948 if followingmeta:
948 d[b'fieldsfollowing'] = followingmeta
949 d[b'fieldsfollowing'] = followingmeta
949
950
950 yield d
951 yield d
951
952
952 for extra in followingdata:
953 for extra in followingdata:
953 yield extra
954 yield extra
954
955
955 # If requested, send bookmarks from nodes that didn't have revision
956 # If requested, send bookmarks from nodes that didn't have revision
956 # data sent so receiver is aware of any bookmark updates.
957 # data sent so receiver is aware of any bookmark updates.
957 if b'bookmarks' in fields:
958 if b'bookmarks' in fields:
958 for node, marks in sorted(nodebookmarks.iteritems()):
959 for node, marks in sorted(nodebookmarks.iteritems()):
959 yield {
960 yield {
960 b'node': node,
961 b'node': node,
961 b'bookmarks': sorted(marks),
962 b'bookmarks': sorted(marks),
962 }
963 }
963
964
964 class FileAccessError(Exception):
965 class FileAccessError(Exception):
965 """Represents an error accessing a specific file."""
966 """Represents an error accessing a specific file."""
966
967
967 def __init__(self, path, msg, args):
968 def __init__(self, path, msg, args):
968 self.path = path
969 self.path = path
969 self.msg = msg
970 self.msg = msg
970 self.args = args
971 self.args = args
971
972
972 def getfilestore(repo, proto, path):
973 def getfilestore(repo, proto, path):
973 """Obtain a file storage object for use with wire protocol.
974 """Obtain a file storage object for use with wire protocol.
974
975
975 Exists as a standalone function so extensions can monkeypatch to add
976 Exists as a standalone function so extensions can monkeypatch to add
976 access control.
977 access control.
977 """
978 """
978 # This seems to work even if the file doesn't exist. So catch
979 # This seems to work even if the file doesn't exist. So catch
979 # "empty" files and return an error.
980 # "empty" files and return an error.
980 fl = repo.file(path)
981 fl = repo.file(path)
981
982
982 if not len(fl):
983 if not len(fl):
983 raise FileAccessError(path, 'unknown file: %s', (path,))
984 raise FileAccessError(path, 'unknown file: %s', (path,))
984
985
985 return fl
986 return fl
986
987
987 def emitfilerevisions(repo, path, revisions, linknodes, fields):
988 def emitfilerevisions(repo, path, revisions, linknodes, fields):
988 for revision in revisions:
989 for revision in revisions:
989 d = {
990 d = {
990 b'node': revision.node,
991 b'node': revision.node,
991 }
992 }
992
993
993 if b'parents' in fields:
994 if b'parents' in fields:
994 d[b'parents'] = [revision.p1node, revision.p2node]
995 d[b'parents'] = [revision.p1node, revision.p2node]
995
996
996 if b'linknode' in fields:
997 if b'linknode' in fields:
997 d[b'linknode'] = linknodes[revision.node]
998 d[b'linknode'] = linknodes[revision.node]
998
999
999 followingmeta = []
1000 followingmeta = []
1000 followingdata = []
1001 followingdata = []
1001
1002
1002 if b'revision' in fields:
1003 if b'revision' in fields:
1003 if revision.revision is not None:
1004 if revision.revision is not None:
1004 followingmeta.append((b'revision', len(revision.revision)))
1005 followingmeta.append((b'revision', len(revision.revision)))
1005 followingdata.append(revision.revision)
1006 followingdata.append(revision.revision)
1006 else:
1007 else:
1007 d[b'deltabasenode'] = revision.basenode
1008 d[b'deltabasenode'] = revision.basenode
1008 followingmeta.append((b'delta', len(revision.delta)))
1009 followingmeta.append((b'delta', len(revision.delta)))
1009 followingdata.append(revision.delta)
1010 followingdata.append(revision.delta)
1010
1011
1011 if followingmeta:
1012 if followingmeta:
1012 d[b'fieldsfollowing'] = followingmeta
1013 d[b'fieldsfollowing'] = followingmeta
1013
1014
1014 yield d
1015 yield d
1015
1016
1016 for extra in followingdata:
1017 for extra in followingdata:
1017 yield extra
1018 yield extra
1018
1019
1019 def makefilematcher(repo, pathfilter):
1020 def makefilematcher(repo, pathfilter):
1020 """Construct a matcher from a path filter dict."""
1021 """Construct a matcher from a path filter dict."""
1021
1022
1022 # Validate values.
1023 # Validate values.
1023 if pathfilter:
1024 if pathfilter:
1024 for key in (b'include', b'exclude'):
1025 for key in (b'include', b'exclude'):
1025 for pattern in pathfilter.get(key, []):
1026 for pattern in pathfilter.get(key, []):
1026 if not pattern.startswith((b'path:', b'rootfilesin:')):
1027 if not pattern.startswith((b'path:', b'rootfilesin:')):
1027 raise error.WireprotoCommandError(
1028 raise error.WireprotoCommandError(
1028 '%s pattern must begin with `path:` or `rootfilesin:`; '
1029 '%s pattern must begin with `path:` or `rootfilesin:`; '
1029 'got %s', (key, pattern))
1030 'got %s', (key, pattern))
1030
1031
1031 if pathfilter:
1032 if pathfilter:
1032 matcher = matchmod.match(repo.root, b'',
1033 matcher = matchmod.match(repo.root, b'',
1033 include=pathfilter.get(b'include', []),
1034 include=pathfilter.get(b'include', []),
1034 exclude=pathfilter.get(b'exclude', []))
1035 exclude=pathfilter.get(b'exclude', []))
1035 else:
1036 else:
1036 matcher = matchmod.match(repo.root, b'')
1037 matcher = matchmod.match(repo.root, b'')
1037
1038
1038 # Requested patterns could include files not in the local store. So
1039 # Requested patterns could include files not in the local store. So
1039 # filter those out.
1040 # filter those out.
1040 return repo.narrowmatch(matcher)
1041 return repo.narrowmatch(matcher)
1041
1042
1042 @wireprotocommand(
1043 @wireprotocommand(
1043 'filedata',
1044 'filedata',
1044 args={
1045 args={
1045 'haveparents': {
1046 'haveparents': {
1046 'type': 'bool',
1047 'type': 'bool',
1047 'default': lambda: False,
1048 'default': lambda: False,
1048 'example': True,
1049 'example': True,
1049 },
1050 },
1050 'nodes': {
1051 'nodes': {
1051 'type': 'list',
1052 'type': 'list',
1052 'example': [b'0123456...'],
1053 'example': [b'0123456...'],
1053 },
1054 },
1054 'fields': {
1055 'fields': {
1055 'type': 'set',
1056 'type': 'set',
1056 'default': set,
1057 'default': set,
1057 'example': {b'parents', b'revision'},
1058 'example': {b'parents', b'revision'},
1058 'validvalues': {b'parents', b'revision', b'linknode'},
1059 'validvalues': {b'parents', b'revision', b'linknode'},
1059 },
1060 },
1060 'path': {
1061 'path': {
1061 'type': 'bytes',
1062 'type': 'bytes',
1062 'example': b'foo.txt',
1063 'example': b'foo.txt',
1063 }
1064 }
1064 },
1065 },
1065 permission='pull',
1066 permission='pull',
1066 # TODO censoring a file revision won't invalidate the cache.
1067 # TODO censoring a file revision won't invalidate the cache.
1067 # Figure out a way to take censoring into account when deriving
1068 # Figure out a way to take censoring into account when deriving
1068 # the cache key.
1069 # the cache key.
1069 cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
1070 cachekeyfn=makecommandcachekeyfn('filedata', 1, allargs=True))
1070 def filedata(repo, proto, haveparents, nodes, fields, path):
1071 def filedata(repo, proto, haveparents, nodes, fields, path):
1071 # TODO this API allows access to file revisions that are attached to
1072 # TODO this API allows access to file revisions that are attached to
1072 # secret changesets. filesdata does not have this problem. Maybe this
1073 # secret changesets. filesdata does not have this problem. Maybe this
1073 # API should be deleted?
1074 # API should be deleted?
1074
1075
1075 try:
1076 try:
1076 # Extensions may wish to access the protocol handler.
1077 # Extensions may wish to access the protocol handler.
1077 store = getfilestore(repo, proto, path)
1078 store = getfilestore(repo, proto, path)
1078 except FileAccessError as e:
1079 except FileAccessError as e:
1079 raise error.WireprotoCommandError(e.msg, e.args)
1080 raise error.WireprotoCommandError(e.msg, e.args)
1080
1081
1081 clnode = repo.changelog.node
1082 clnode = repo.changelog.node
1082 linknodes = {}
1083 linknodes = {}
1083
1084
1084 # Validate requested nodes.
1085 # Validate requested nodes.
1085 for node in nodes:
1086 for node in nodes:
1086 try:
1087 try:
1087 store.rev(node)
1088 store.rev(node)
1088 except error.LookupError:
1089 except error.LookupError:
1089 raise error.WireprotoCommandError('unknown file node: %s',
1090 raise error.WireprotoCommandError('unknown file node: %s',
1090 (hex(node),))
1091 (hex(node),))
1091
1092
1092 # TODO by creating the filectx against a specific file revision
1093 # TODO by creating the filectx against a specific file revision
1093 # instead of changeset, linkrev() is always used. This is wrong for
1094 # instead of changeset, linkrev() is always used. This is wrong for
1094 # cases where linkrev() may refer to a hidden changeset. But since this
1095 # cases where linkrev() may refer to a hidden changeset. But since this
1095 # API doesn't know anything about changesets, we're not sure how to
1096 # API doesn't know anything about changesets, we're not sure how to
1096 # disambiguate the linknode. Perhaps we should delete this API?
1097 # disambiguate the linknode. Perhaps we should delete this API?
1097 fctx = repo.filectx(path, fileid=node)
1098 fctx = repo.filectx(path, fileid=node)
1098 linknodes[node] = clnode(fctx.introrev())
1099 linknodes[node] = clnode(fctx.introrev())
1099
1100
1100 revisions = store.emitrevisions(nodes,
1101 revisions = store.emitrevisions(nodes,
1101 revisiondata=b'revision' in fields,
1102 revisiondata=b'revision' in fields,
1102 assumehaveparentrevisions=haveparents)
1103 assumehaveparentrevisions=haveparents)
1103
1104
1104 yield {
1105 yield {
1105 b'totalitems': len(nodes),
1106 b'totalitems': len(nodes),
1106 }
1107 }
1107
1108
1108 for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
1109 for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
1109 yield o
1110 yield o
1110
1111
1111 def filesdatacapabilities(repo, proto):
1112 def filesdatacapabilities(repo, proto):
1112 batchsize = repo.ui.configint(
1113 batchsize = repo.ui.configint(
1113 b'experimental', b'server.filesdata.recommended-batch-size')
1114 b'experimental', b'server.filesdata.recommended-batch-size')
1114 return {
1115 return {
1115 b'recommendedbatchsize': batchsize,
1116 b'recommendedbatchsize': batchsize,
1116 }
1117 }
1117
1118
1118 @wireprotocommand(
1119 @wireprotocommand(
1119 'filesdata',
1120 'filesdata',
1120 args={
1121 args={
1121 'haveparents': {
1122 'haveparents': {
1122 'type': 'bool',
1123 'type': 'bool',
1123 'default': lambda: False,
1124 'default': lambda: False,
1124 'example': True,
1125 'example': True,
1125 },
1126 },
1126 'fields': {
1127 'fields': {
1127 'type': 'set',
1128 'type': 'set',
1128 'default': set,
1129 'default': set,
1129 'example': {b'parents', b'revision'},
1130 'example': {b'parents', b'revision'},
1130 'validvalues': {b'firstchangeset', b'linknode', b'parents',
1131 'validvalues': {b'firstchangeset', b'linknode', b'parents',
1131 b'revision'},
1132 b'revision'},
1132 },
1133 },
1133 'pathfilter': {
1134 'pathfilter': {
1134 'type': 'dict',
1135 'type': 'dict',
1135 'default': lambda: None,
1136 'default': lambda: None,
1136 'example': {b'include': [b'path:tests']},
1137 'example': {b'include': [b'path:tests']},
1137 },
1138 },
1138 'revisions': {
1139 'revisions': {
1139 'type': 'list',
1140 'type': 'list',
1140 'example': [{
1141 'example': [{
1141 b'type': b'changesetexplicit',
1142 b'type': b'changesetexplicit',
1142 b'nodes': [b'abcdef...'],
1143 b'nodes': [b'abcdef...'],
1143 }],
1144 }],
1144 },
1145 },
1145 },
1146 },
1146 permission='pull',
1147 permission='pull',
1147 # TODO censoring a file revision won't invalidate the cache.
1148 # TODO censoring a file revision won't invalidate the cache.
1148 # Figure out a way to take censoring into account when deriving
1149 # Figure out a way to take censoring into account when deriving
1149 # the cache key.
1150 # the cache key.
1150 cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
1151 cachekeyfn=makecommandcachekeyfn('filesdata', 1, allargs=True),
1151 extracapabilitiesfn=filesdatacapabilities)
1152 extracapabilitiesfn=filesdatacapabilities)
1152 def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
1153 def filesdata(repo, proto, haveparents, fields, pathfilter, revisions):
1153 # TODO This should operate on a repo that exposes obsolete changesets. There
1154 # TODO This should operate on a repo that exposes obsolete changesets. There
1154 # is a race between a client making a push that obsoletes a changeset and
1155 # is a race between a client making a push that obsoletes a changeset and
1155 # another client fetching files data for that changeset. If a client has a
1156 # another client fetching files data for that changeset. If a client has a
1156 # changeset, it should probably be allowed to access files data for that
1157 # changeset, it should probably be allowed to access files data for that
1157 # changeset.
1158 # changeset.
1158
1159
1159 outgoing = resolvenodes(repo, revisions)
1160 outgoing = resolvenodes(repo, revisions)
1160 filematcher = makefilematcher(repo, pathfilter)
1161 filematcher = makefilematcher(repo, pathfilter)
1161
1162
1162 # path -> {fnode: linknode}
1163 # path -> {fnode: linknode}
1163 fnodes = collections.defaultdict(dict)
1164 fnodes = collections.defaultdict(dict)
1164
1165
1165 # We collect the set of relevant file revisions by iterating the changeset
1166 # We collect the set of relevant file revisions by iterating the changeset
1166 # revisions and either walking the set of files recorded in the changeset
1167 # revisions and either walking the set of files recorded in the changeset
1167 # or by walking the manifest at that revision. There is probably room for a
1168 # or by walking the manifest at that revision. There is probably room for a
1168 # storage-level API to request this data, as it can be expensive to compute
1169 # storage-level API to request this data, as it can be expensive to compute
1169 # and would benefit from caching or alternate storage from what revlogs
1170 # and would benefit from caching or alternate storage from what revlogs
1170 # provide.
1171 # provide.
1171 for node in outgoing:
1172 for node in outgoing:
1172 ctx = repo[node]
1173 ctx = repo[node]
1173 mctx = ctx.manifestctx()
1174 mctx = ctx.manifestctx()
1174 md = mctx.read()
1175 md = mctx.read()
1175
1176
1176 if haveparents:
1177 if haveparents:
1177 checkpaths = ctx.files()
1178 checkpaths = ctx.files()
1178 else:
1179 else:
1179 checkpaths = md.keys()
1180 checkpaths = md.keys()
1180
1181
1181 for path in checkpaths:
1182 for path in checkpaths:
1182 fnode = md[path]
1183 fnode = md[path]
1183
1184
1184 if path in fnodes and fnode in fnodes[path]:
1185 if path in fnodes and fnode in fnodes[path]:
1185 continue
1186 continue
1186
1187
1187 if not filematcher(path):
1188 if not filematcher(path):
1188 continue
1189 continue
1189
1190
1190 fnodes[path].setdefault(fnode, node)
1191 fnodes[path].setdefault(fnode, node)
1191
1192
1192 yield {
1193 yield {
1193 b'totalpaths': len(fnodes),
1194 b'totalpaths': len(fnodes),
1194 b'totalitems': sum(len(v) for v in fnodes.values())
1195 b'totalitems': sum(len(v) for v in fnodes.values())
1195 }
1196 }
1196
1197
1197 for path, filenodes in sorted(fnodes.items()):
1198 for path, filenodes in sorted(fnodes.items()):
1198 try:
1199 try:
1199 store = getfilestore(repo, proto, path)
1200 store = getfilestore(repo, proto, path)
1200 except FileAccessError as e:
1201 except FileAccessError as e:
1201 raise error.WireprotoCommandError(e.msg, e.args)
1202 raise error.WireprotoCommandError(e.msg, e.args)
1202
1203
1203 yield {
1204 yield {
1204 b'path': path,
1205 b'path': path,
1205 b'totalitems': len(filenodes),
1206 b'totalitems': len(filenodes),
1206 }
1207 }
1207
1208
1208 revisions = store.emitrevisions(filenodes.keys(),
1209 revisions = store.emitrevisions(filenodes.keys(),
1209 revisiondata=b'revision' in fields,
1210 revisiondata=b'revision' in fields,
1210 assumehaveparentrevisions=haveparents)
1211 assumehaveparentrevisions=haveparents)
1211
1212
1212 for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
1213 for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
1213 yield o
1214 yield o
1214
1215
1215 @wireprotocommand(
1216 @wireprotocommand(
1216 'heads',
1217 'heads',
1217 args={
1218 args={
1218 'publiconly': {
1219 'publiconly': {
1219 'type': 'bool',
1220 'type': 'bool',
1220 'default': lambda: False,
1221 'default': lambda: False,
1221 'example': False,
1222 'example': False,
1222 },
1223 },
1223 },
1224 },
1224 permission='pull')
1225 permission='pull')
1225 def headsv2(repo, proto, publiconly):
1226 def headsv2(repo, proto, publiconly):
1226 if publiconly:
1227 if publiconly:
1227 repo = repo.filtered('immutable')
1228 repo = repo.filtered('immutable')
1228
1229
1229 yield repo.heads()
1230 yield repo.heads()
1230
1231
1231 @wireprotocommand(
1232 @wireprotocommand(
1232 'known',
1233 'known',
1233 args={
1234 args={
1234 'nodes': {
1235 'nodes': {
1235 'type': 'list',
1236 'type': 'list',
1236 'default': list,
1237 'default': list,
1237 'example': [b'deadbeef'],
1238 'example': [b'deadbeef'],
1238 },
1239 },
1239 },
1240 },
1240 permission='pull')
1241 permission='pull')
1241 def knownv2(repo, proto, nodes):
1242 def knownv2(repo, proto, nodes):
1242 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
1243 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
1243 yield result
1244 yield result
1244
1245
1245 @wireprotocommand(
1246 @wireprotocommand(
1246 'listkeys',
1247 'listkeys',
1247 args={
1248 args={
1248 'namespace': {
1249 'namespace': {
1249 'type': 'bytes',
1250 'type': 'bytes',
1250 'example': b'ns',
1251 'example': b'ns',
1251 },
1252 },
1252 },
1253 },
1253 permission='pull')
1254 permission='pull')
1254 def listkeysv2(repo, proto, namespace):
1255 def listkeysv2(repo, proto, namespace):
1255 keys = repo.listkeys(encoding.tolocal(namespace))
1256 keys = repo.listkeys(encoding.tolocal(namespace))
1256 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
1257 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
1257 for k, v in keys.iteritems()}
1258 for k, v in keys.iteritems()}
1258
1259
1259 yield keys
1260 yield keys
1260
1261
1261 @wireprotocommand(
1262 @wireprotocommand(
1262 'lookup',
1263 'lookup',
1263 args={
1264 args={
1264 'key': {
1265 'key': {
1265 'type': 'bytes',
1266 'type': 'bytes',
1266 'example': b'foo',
1267 'example': b'foo',
1267 },
1268 },
1268 },
1269 },
1269 permission='pull')
1270 permission='pull')
1270 def lookupv2(repo, proto, key):
1271 def lookupv2(repo, proto, key):
1271 key = encoding.tolocal(key)
1272 key = encoding.tolocal(key)
1272
1273
1273 # TODO handle exception.
1274 # TODO handle exception.
1274 node = repo.lookup(key)
1275 node = repo.lookup(key)
1275
1276
1276 yield node
1277 yield node
1277
1278
1278 def manifestdatacapabilities(repo, proto):
1279 def manifestdatacapabilities(repo, proto):
1279 batchsize = repo.ui.configint(
1280 batchsize = repo.ui.configint(
1280 b'experimental', b'server.manifestdata.recommended-batch-size')
1281 b'experimental', b'server.manifestdata.recommended-batch-size')
1281
1282
1282 return {
1283 return {
1283 b'recommendedbatchsize': batchsize,
1284 b'recommendedbatchsize': batchsize,
1284 }
1285 }
1285
1286
1286 @wireprotocommand(
1287 @wireprotocommand(
1287 'manifestdata',
1288 'manifestdata',
1288 args={
1289 args={
1289 'nodes': {
1290 'nodes': {
1290 'type': 'list',
1291 'type': 'list',
1291 'example': [b'0123456...'],
1292 'example': [b'0123456...'],
1292 },
1293 },
1293 'haveparents': {
1294 'haveparents': {
1294 'type': 'bool',
1295 'type': 'bool',
1295 'default': lambda: False,
1296 'default': lambda: False,
1296 'example': True,
1297 'example': True,
1297 },
1298 },
1298 'fields': {
1299 'fields': {
1299 'type': 'set',
1300 'type': 'set',
1300 'default': set,
1301 'default': set,
1301 'example': {b'parents', b'revision'},
1302 'example': {b'parents', b'revision'},
1302 'validvalues': {b'parents', b'revision'},
1303 'validvalues': {b'parents', b'revision'},
1303 },
1304 },
1304 'tree': {
1305 'tree': {
1305 'type': 'bytes',
1306 'type': 'bytes',
1306 'example': b'',
1307 'example': b'',
1307 },
1308 },
1308 },
1309 },
1309 permission='pull',
1310 permission='pull',
1310 cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
1311 cachekeyfn=makecommandcachekeyfn('manifestdata', 1, allargs=True),
1311 extracapabilitiesfn=manifestdatacapabilities)
1312 extracapabilitiesfn=manifestdatacapabilities)
1312 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
1313 def manifestdata(repo, proto, haveparents, nodes, fields, tree):
1313 store = repo.manifestlog.getstorage(tree)
1314 store = repo.manifestlog.getstorage(tree)
1314
1315
1315 # Validate the node is known and abort on unknown revisions.
1316 # Validate the node is known and abort on unknown revisions.
1316 for node in nodes:
1317 for node in nodes:
1317 try:
1318 try:
1318 store.rev(node)
1319 store.rev(node)
1319 except error.LookupError:
1320 except error.LookupError:
1320 raise error.WireprotoCommandError(
1321 raise error.WireprotoCommandError(
1321 'unknown node: %s', (node,))
1322 'unknown node: %s', (node,))
1322
1323
1323 revisions = store.emitrevisions(nodes,
1324 revisions = store.emitrevisions(nodes,
1324 revisiondata=b'revision' in fields,
1325 revisiondata=b'revision' in fields,
1325 assumehaveparentrevisions=haveparents)
1326 assumehaveparentrevisions=haveparents)
1326
1327
1327 yield {
1328 yield {
1328 b'totalitems': len(nodes),
1329 b'totalitems': len(nodes),
1329 }
1330 }
1330
1331
1331 for revision in revisions:
1332 for revision in revisions:
1332 d = {
1333 d = {
1333 b'node': revision.node,
1334 b'node': revision.node,
1334 }
1335 }
1335
1336
1336 if b'parents' in fields:
1337 if b'parents' in fields:
1337 d[b'parents'] = [revision.p1node, revision.p2node]
1338 d[b'parents'] = [revision.p1node, revision.p2node]
1338
1339
1339 followingmeta = []
1340 followingmeta = []
1340 followingdata = []
1341 followingdata = []
1341
1342
1342 if b'revision' in fields:
1343 if b'revision' in fields:
1343 if revision.revision is not None:
1344 if revision.revision is not None:
1344 followingmeta.append((b'revision', len(revision.revision)))
1345 followingmeta.append((b'revision', len(revision.revision)))
1345 followingdata.append(revision.revision)
1346 followingdata.append(revision.revision)
1346 else:
1347 else:
1347 d[b'deltabasenode'] = revision.basenode
1348 d[b'deltabasenode'] = revision.basenode
1348 followingmeta.append((b'delta', len(revision.delta)))
1349 followingmeta.append((b'delta', len(revision.delta)))
1349 followingdata.append(revision.delta)
1350 followingdata.append(revision.delta)
1350
1351
1351 if followingmeta:
1352 if followingmeta:
1352 d[b'fieldsfollowing'] = followingmeta
1353 d[b'fieldsfollowing'] = followingmeta
1353
1354
1354 yield d
1355 yield d
1355
1356
1356 for extra in followingdata:
1357 for extra in followingdata:
1357 yield extra
1358 yield extra
1358
1359
1359 @wireprotocommand(
1360 @wireprotocommand(
1360 'pushkey',
1361 'pushkey',
1361 args={
1362 args={
1362 'namespace': {
1363 'namespace': {
1363 'type': 'bytes',
1364 'type': 'bytes',
1364 'example': b'ns',
1365 'example': b'ns',
1365 },
1366 },
1366 'key': {
1367 'key': {
1367 'type': 'bytes',
1368 'type': 'bytes',
1368 'example': b'key',
1369 'example': b'key',
1369 },
1370 },
1370 'old': {
1371 'old': {
1371 'type': 'bytes',
1372 'type': 'bytes',
1372 'example': b'old',
1373 'example': b'old',
1373 },
1374 },
1374 'new': {
1375 'new': {
1375 'type': 'bytes',
1376 'type': 'bytes',
1376 'example': 'new',
1377 'example': 'new',
1377 },
1378 },
1378 },
1379 },
1379 permission='push')
1380 permission='push')
1380 def pushkeyv2(repo, proto, namespace, key, old, new):
1381 def pushkeyv2(repo, proto, namespace, key, old, new):
1381 # TODO handle ui output redirection
1382 # TODO handle ui output redirection
1382 yield repo.pushkey(encoding.tolocal(namespace),
1383 yield repo.pushkey(encoding.tolocal(namespace),
1383 encoding.tolocal(key),
1384 encoding.tolocal(key),
1384 encoding.tolocal(old),
1385 encoding.tolocal(old),
1385 encoding.tolocal(new))
1386 encoding.tolocal(new))
1386
1387
1387
1388
1388 @wireprotocommand(
1389 @wireprotocommand(
1389 'rawstorefiledata',
1390 'rawstorefiledata',
1390 args={
1391 args={
1391 'files': {
1392 'files': {
1392 'type': 'list',
1393 'type': 'list',
1393 'example': [b'changelog', b'manifestlog'],
1394 'example': [b'changelog', b'manifestlog'],
1394 },
1395 },
1395 'pathfilter': {
1396 'pathfilter': {
1396 'type': 'list',
1397 'type': 'list',
1397 'default': lambda: None,
1398 'default': lambda: None,
1398 'example': {b'include': [b'path:tests']},
1399 'example': {b'include': [b'path:tests']},
1399 },
1400 },
1400 },
1401 },
1401 permission='pull')
1402 permission='pull')
1402 def rawstorefiledata(repo, proto, files, pathfilter):
1403 def rawstorefiledata(repo, proto, files, pathfilter):
1403 if not streamclone.allowservergeneration(repo):
1404 if not streamclone.allowservergeneration(repo):
1404 raise error.WireprotoCommandError(b'stream clone is disabled')
1405 raise error.WireprotoCommandError(b'stream clone is disabled')
1405
1406
1406 # TODO support dynamically advertising what store files "sets" are
1407 # TODO support dynamically advertising what store files "sets" are
1407 # available. For now, we support changelog, manifestlog, and files.
1408 # available. For now, we support changelog, manifestlog, and files.
1408 files = set(files)
1409 files = set(files)
1409 allowedfiles = {b'changelog', b'manifestlog'}
1410 allowedfiles = {b'changelog', b'manifestlog'}
1410
1411
1411 unsupported = files - allowedfiles
1412 unsupported = files - allowedfiles
1412 if unsupported:
1413 if unsupported:
1413 raise error.WireprotoCommandError(b'unknown file type: %s',
1414 raise error.WireprotoCommandError(b'unknown file type: %s',
1414 (b', '.join(sorted(unsupported)),))
1415 (b', '.join(sorted(unsupported)),))
1415
1416
1416 with repo.lock():
1417 with repo.lock():
1417 topfiles = list(repo.store.topfiles())
1418 topfiles = list(repo.store.topfiles())
1418
1419
1419 sendfiles = []
1420 sendfiles = []
1420 totalsize = 0
1421 totalsize = 0
1421
1422
1422 # TODO this is a bunch of storage layer interface abstractions because
1423 # TODO this is a bunch of storage layer interface abstractions because
1423 # it assumes revlogs.
1424 # it assumes revlogs.
1424 for name, encodedname, size in topfiles:
1425 for name, encodedname, size in topfiles:
1425 if b'changelog' in files and name.startswith(b'00changelog'):
1426 if b'changelog' in files and name.startswith(b'00changelog'):
1426 pass
1427 pass
1427 elif b'manifestlog' in files and name.startswith(b'00manifest'):
1428 elif b'manifestlog' in files and name.startswith(b'00manifest'):
1428 pass
1429 pass
1429 else:
1430 else:
1430 continue
1431 continue
1431
1432
1432 sendfiles.append((b'store', name, size))
1433 sendfiles.append((b'store', name, size))
1433 totalsize += size
1434 totalsize += size
1434
1435
1435 yield {
1436 yield {
1436 b'filecount': len(sendfiles),
1437 b'filecount': len(sendfiles),
1437 b'totalsize': totalsize,
1438 b'totalsize': totalsize,
1438 }
1439 }
1439
1440
1440 for location, name, size in sendfiles:
1441 for location, name, size in sendfiles:
1441 yield {
1442 yield {
1442 b'location': location,
1443 b'location': location,
1443 b'path': name,
1444 b'path': name,
1444 b'size': size,
1445 b'size': size,
1445 }
1446 }
1446
1447
1447 # We have to use a closure for this to ensure the context manager is
1448 # We have to use a closure for this to ensure the context manager is
1448 # closed only after sending the final chunk.
1449 # closed only after sending the final chunk.
1449 def getfiledata():
1450 def getfiledata():
1450 with repo.svfs(name, 'rb', auditpath=False) as fh:
1451 with repo.svfs(name, 'rb', auditpath=False) as fh:
1451 for chunk in util.filechunkiter(fh, limit=size):
1452 for chunk in util.filechunkiter(fh, limit=size):
1452 yield chunk
1453 yield chunk
1453
1454
1454 yield wireprototypes.indefinitebytestringresponse(
1455 yield wireprototypes.indefinitebytestringresponse(
1455 getfiledata())
1456 getfiledata())
General Comments 0
You need to be logged in to leave comments. Login now