##// END OF EJS Templates
peer: introduce a limitedarguments attributes...
marmoute -
r42334:69921d02 default
parent child Browse files
Show More
@@ -1,1006 +1,1010
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 repository,
24 repository,
25 statichttprepo,
25 statichttprepo,
26 url as urlmod,
26 url as urlmod,
27 util,
27 util,
28 wireprotoframing,
28 wireprotoframing,
29 wireprototypes,
29 wireprototypes,
30 wireprotov1peer,
30 wireprotov1peer,
31 wireprotov2peer,
31 wireprotov2peer,
32 wireprotov2server,
32 wireprotov2server,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 cborutil,
35 cborutil,
36 interfaceutil,
36 interfaceutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 httplib = util.httplib
40 httplib = util.httplib
41 urlerr = util.urlerr
41 urlerr = util.urlerr
42 urlreq = util.urlreq
42 urlreq = util.urlreq
43
43
44 def encodevalueinheaders(value, header, limit):
44 def encodevalueinheaders(value, header, limit):
45 """Encode a string value into multiple HTTP headers.
45 """Encode a string value into multiple HTTP headers.
46
46
47 ``value`` will be encoded into 1 or more HTTP headers with the names
47 ``value`` will be encoded into 1 or more HTTP headers with the names
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
49 name + value will be at most ``limit`` bytes long.
49 name + value will be at most ``limit`` bytes long.
50
50
51 Returns an iterable of 2-tuples consisting of header names and
51 Returns an iterable of 2-tuples consisting of header names and
52 values as native strings.
52 values as native strings.
53 """
53 """
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
55 # not bytes. This function always takes bytes in as arguments.
55 # not bytes. This function always takes bytes in as arguments.
56 fmt = pycompat.strurl(header) + r'-%s'
56 fmt = pycompat.strurl(header) + r'-%s'
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
58 # and not a unicode: we're just getting the encoded length anyway,
58 # and not a unicode: we're just getting the encoded length anyway,
59 # and using an r-string to make it portable between Python 2 and 3
59 # and using an r-string to make it portable between Python 2 and 3
60 # doesn't work because then the \r is a literal backslash-r
60 # doesn't work because then the \r is a literal backslash-r
61 # instead of a carriage return.
61 # instead of a carriage return.
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
63 result = []
63 result = []
64
64
65 n = 0
65 n = 0
66 for i in pycompat.xrange(0, len(value), valuelen):
66 for i in pycompat.xrange(0, len(value), valuelen):
67 n += 1
67 n += 1
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
69
69
70 return result
70 return result
71
71
72 class _multifile(object):
72 class _multifile(object):
73 def __init__(self, *fileobjs):
73 def __init__(self, *fileobjs):
74 for f in fileobjs:
74 for f in fileobjs:
75 if not util.safehasattr(f, 'length'):
75 if not util.safehasattr(f, 'length'):
76 raise ValueError(
76 raise ValueError(
77 '_multifile only supports file objects that '
77 '_multifile only supports file objects that '
78 'have a length but this one does not:', type(f), f)
78 'have a length but this one does not:', type(f), f)
79 self._fileobjs = fileobjs
79 self._fileobjs = fileobjs
80 self._index = 0
80 self._index = 0
81
81
82 @property
82 @property
83 def length(self):
83 def length(self):
84 return sum(f.length for f in self._fileobjs)
84 return sum(f.length for f in self._fileobjs)
85
85
86 def read(self, amt=None):
86 def read(self, amt=None):
87 if amt <= 0:
87 if amt <= 0:
88 return ''.join(f.read() for f in self._fileobjs)
88 return ''.join(f.read() for f in self._fileobjs)
89 parts = []
89 parts = []
90 while amt and self._index < len(self._fileobjs):
90 while amt and self._index < len(self._fileobjs):
91 parts.append(self._fileobjs[self._index].read(amt))
91 parts.append(self._fileobjs[self._index].read(amt))
92 got = len(parts[-1])
92 got = len(parts[-1])
93 if got < amt:
93 if got < amt:
94 self._index += 1
94 self._index += 1
95 amt -= got
95 amt -= got
96 return ''.join(parts)
96 return ''.join(parts)
97
97
98 def seek(self, offset, whence=os.SEEK_SET):
98 def seek(self, offset, whence=os.SEEK_SET):
99 if whence != os.SEEK_SET:
99 if whence != os.SEEK_SET:
100 raise NotImplementedError(
100 raise NotImplementedError(
101 '_multifile does not support anything other'
101 '_multifile does not support anything other'
102 ' than os.SEEK_SET for whence on seek()')
102 ' than os.SEEK_SET for whence on seek()')
103 if offset != 0:
103 if offset != 0:
104 raise NotImplementedError(
104 raise NotImplementedError(
105 '_multifile only supports seeking to start, but that '
105 '_multifile only supports seeking to start, but that '
106 'could be fixed if you need it')
106 'could be fixed if you need it')
107 for f in self._fileobjs:
107 for f in self._fileobjs:
108 f.seek(0)
108 f.seek(0)
109 self._index = 0
109 self._index = 0
110
110
111 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
111 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
112 repobaseurl, cmd, args):
112 repobaseurl, cmd, args):
113 """Make an HTTP request to run a command for a version 1 client.
113 """Make an HTTP request to run a command for a version 1 client.
114
114
115 ``caps`` is a set of known server capabilities. The value may be
115 ``caps`` is a set of known server capabilities. The value may be
116 None if capabilities are not yet known.
116 None if capabilities are not yet known.
117
117
118 ``capablefn`` is a function to evaluate a capability.
118 ``capablefn`` is a function to evaluate a capability.
119
119
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 raw data to pass to it.
121 raw data to pass to it.
122 """
122 """
123 if cmd == 'pushkey':
123 if cmd == 'pushkey':
124 args['data'] = ''
124 args['data'] = ''
125 data = args.pop('data', None)
125 data = args.pop('data', None)
126 headers = args.pop('headers', {})
126 headers = args.pop('headers', {})
127
127
128 ui.debug("sending %s command\n" % cmd)
128 ui.debug("sending %s command\n" % cmd)
129 q = [('cmd', cmd)]
129 q = [('cmd', cmd)]
130 headersize = 0
130 headersize = 0
131 # Important: don't use self.capable() here or else you end up
131 # Important: don't use self.capable() here or else you end up
132 # with infinite recursion when trying to look up capabilities
132 # with infinite recursion when trying to look up capabilities
133 # for the first time.
133 # for the first time.
134 postargsok = caps is not None and 'httppostargs' in caps
134 postargsok = caps is not None and 'httppostargs' in caps
135
135
136 # Send arguments via POST.
136 # Send arguments via POST.
137 if postargsok and args:
137 if postargsok and args:
138 strargs = urlreq.urlencode(sorted(args.items()))
138 strargs = urlreq.urlencode(sorted(args.items()))
139 if not data:
139 if not data:
140 data = strargs
140 data = strargs
141 else:
141 else:
142 if isinstance(data, bytes):
142 if isinstance(data, bytes):
143 i = io.BytesIO(data)
143 i = io.BytesIO(data)
144 i.length = len(data)
144 i.length = len(data)
145 data = i
145 data = i
146 argsio = io.BytesIO(strargs)
146 argsio = io.BytesIO(strargs)
147 argsio.length = len(strargs)
147 argsio.length = len(strargs)
148 data = _multifile(argsio, data)
148 data = _multifile(argsio, data)
149 headers[r'X-HgArgs-Post'] = len(strargs)
149 headers[r'X-HgArgs-Post'] = len(strargs)
150 elif args:
150 elif args:
151 # Calling self.capable() can infinite loop if we are calling
151 # Calling self.capable() can infinite loop if we are calling
152 # "capabilities". But that command should never accept wire
152 # "capabilities". But that command should never accept wire
153 # protocol arguments. So this should never happen.
153 # protocol arguments. So this should never happen.
154 assert cmd != 'capabilities'
154 assert cmd != 'capabilities'
155 httpheader = capablefn('httpheader')
155 httpheader = capablefn('httpheader')
156 if httpheader:
156 if httpheader:
157 headersize = int(httpheader.split(',', 1)[0])
157 headersize = int(httpheader.split(',', 1)[0])
158
158
159 # Send arguments via HTTP headers.
159 # Send arguments via HTTP headers.
160 if headersize > 0:
160 if headersize > 0:
161 # The headers can typically carry more data than the URL.
161 # The headers can typically carry more data than the URL.
162 encargs = urlreq.urlencode(sorted(args.items()))
162 encargs = urlreq.urlencode(sorted(args.items()))
163 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
163 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
164 headersize):
164 headersize):
165 headers[header] = value
165 headers[header] = value
166 # Send arguments via query string (Mercurial <1.9).
166 # Send arguments via query string (Mercurial <1.9).
167 else:
167 else:
168 q += sorted(args.items())
168 q += sorted(args.items())
169
169
170 qs = '?%s' % urlreq.urlencode(q)
170 qs = '?%s' % urlreq.urlencode(q)
171 cu = "%s%s" % (repobaseurl, qs)
171 cu = "%s%s" % (repobaseurl, qs)
172 size = 0
172 size = 0
173 if util.safehasattr(data, 'length'):
173 if util.safehasattr(data, 'length'):
174 size = data.length
174 size = data.length
175 elif data is not None:
175 elif data is not None:
176 size = len(data)
176 size = len(data)
177 if data is not None and r'Content-Type' not in headers:
177 if data is not None and r'Content-Type' not in headers:
178 headers[r'Content-Type'] = r'application/mercurial-0.1'
178 headers[r'Content-Type'] = r'application/mercurial-0.1'
179
179
180 # Tell the server we accept application/mercurial-0.2 and multiple
180 # Tell the server we accept application/mercurial-0.2 and multiple
181 # compression formats if the server is capable of emitting those
181 # compression formats if the server is capable of emitting those
182 # payloads.
182 # payloads.
183 # Note: Keep this set empty by default, as client advertisement of
183 # Note: Keep this set empty by default, as client advertisement of
184 # protocol parameters should only occur after the handshake.
184 # protocol parameters should only occur after the handshake.
185 protoparams = set()
185 protoparams = set()
186
186
187 mediatypes = set()
187 mediatypes = set()
188 if caps is not None:
188 if caps is not None:
189 mt = capablefn('httpmediatype')
189 mt = capablefn('httpmediatype')
190 if mt:
190 if mt:
191 protoparams.add('0.1')
191 protoparams.add('0.1')
192 mediatypes = set(mt.split(','))
192 mediatypes = set(mt.split(','))
193
193
194 protoparams.add('partial-pull')
194 protoparams.add('partial-pull')
195
195
196 if '0.2tx' in mediatypes:
196 if '0.2tx' in mediatypes:
197 protoparams.add('0.2')
197 protoparams.add('0.2')
198
198
199 if '0.2tx' in mediatypes and capablefn('compression'):
199 if '0.2tx' in mediatypes and capablefn('compression'):
200 # We /could/ compare supported compression formats and prune
200 # We /could/ compare supported compression formats and prune
201 # non-mutually supported or error if nothing is mutually supported.
201 # non-mutually supported or error if nothing is mutually supported.
202 # For now, send the full list to the server and have it error.
202 # For now, send the full list to the server and have it error.
203 comps = [e.wireprotosupport().name for e in
203 comps = [e.wireprotosupport().name for e in
204 util.compengines.supportedwireengines(util.CLIENTROLE)]
204 util.compengines.supportedwireengines(util.CLIENTROLE)]
205 protoparams.add('comp=%s' % ','.join(comps))
205 protoparams.add('comp=%s' % ','.join(comps))
206
206
207 if protoparams:
207 if protoparams:
208 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
208 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
209 'X-HgProto',
209 'X-HgProto',
210 headersize or 1024)
210 headersize or 1024)
211 for header, value in protoheaders:
211 for header, value in protoheaders:
212 headers[header] = value
212 headers[header] = value
213
213
214 varyheaders = []
214 varyheaders = []
215 for header in headers:
215 for header in headers:
216 if header.lower().startswith(r'x-hg'):
216 if header.lower().startswith(r'x-hg'):
217 varyheaders.append(header)
217 varyheaders.append(header)
218
218
219 if varyheaders:
219 if varyheaders:
220 headers[r'Vary'] = r','.join(sorted(varyheaders))
220 headers[r'Vary'] = r','.join(sorted(varyheaders))
221
221
222 req = requestbuilder(pycompat.strurl(cu), data, headers)
222 req = requestbuilder(pycompat.strurl(cu), data, headers)
223
223
224 if data is not None:
224 if data is not None:
225 ui.debug("sending %d bytes\n" % size)
225 ui.debug("sending %d bytes\n" % size)
226 req.add_unredirected_header(r'Content-Length', r'%d' % size)
226 req.add_unredirected_header(r'Content-Length', r'%d' % size)
227
227
228 return req, cu, qs
228 return req, cu, qs
229
229
230 def _reqdata(req):
230 def _reqdata(req):
231 """Get request data, if any. If no data, returns None."""
231 """Get request data, if any. If no data, returns None."""
232 if pycompat.ispy3:
232 if pycompat.ispy3:
233 return req.data
233 return req.data
234 if not req.has_data():
234 if not req.has_data():
235 return None
235 return None
236 return req.get_data()
236 return req.get_data()
237
237
238 def sendrequest(ui, opener, req):
238 def sendrequest(ui, opener, req):
239 """Send a prepared HTTP request.
239 """Send a prepared HTTP request.
240
240
241 Returns the response object.
241 Returns the response object.
242 """
242 """
243 dbg = ui.debug
243 dbg = ui.debug
244 if (ui.debugflag
244 if (ui.debugflag
245 and ui.configbool('devel', 'debug.peer-request')):
245 and ui.configbool('devel', 'debug.peer-request')):
246 line = 'devel-peer-request: %s\n'
246 line = 'devel-peer-request: %s\n'
247 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
247 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
248 pycompat.bytesurl(req.get_full_url())))
248 pycompat.bytesurl(req.get_full_url())))
249 hgargssize = None
249 hgargssize = None
250
250
251 for header, value in sorted(req.header_items()):
251 for header, value in sorted(req.header_items()):
252 header = pycompat.bytesurl(header)
252 header = pycompat.bytesurl(header)
253 value = pycompat.bytesurl(value)
253 value = pycompat.bytesurl(value)
254 if header.startswith('X-hgarg-'):
254 if header.startswith('X-hgarg-'):
255 if hgargssize is None:
255 if hgargssize is None:
256 hgargssize = 0
256 hgargssize = 0
257 hgargssize += len(value)
257 hgargssize += len(value)
258 else:
258 else:
259 dbg(line % ' %s %s' % (header, value))
259 dbg(line % ' %s %s' % (header, value))
260
260
261 if hgargssize is not None:
261 if hgargssize is not None:
262 dbg(line % ' %d bytes of commands arguments in headers'
262 dbg(line % ' %d bytes of commands arguments in headers'
263 % hgargssize)
263 % hgargssize)
264 data = _reqdata(req)
264 data = _reqdata(req)
265 if data is not None:
265 if data is not None:
266 length = getattr(data, 'length', None)
266 length = getattr(data, 'length', None)
267 if length is None:
267 if length is None:
268 length = len(data)
268 length = len(data)
269 dbg(line % ' %d bytes of data' % length)
269 dbg(line % ' %d bytes of data' % length)
270
270
271 start = util.timer()
271 start = util.timer()
272
272
273 res = None
273 res = None
274 try:
274 try:
275 res = opener.open(req)
275 res = opener.open(req)
276 except urlerr.httperror as inst:
276 except urlerr.httperror as inst:
277 if inst.code == 401:
277 if inst.code == 401:
278 raise error.Abort(_('authorization failed'))
278 raise error.Abort(_('authorization failed'))
279 raise
279 raise
280 except httplib.HTTPException as inst:
280 except httplib.HTTPException as inst:
281 ui.debug('http error requesting %s\n' %
281 ui.debug('http error requesting %s\n' %
282 util.hidepassword(req.get_full_url()))
282 util.hidepassword(req.get_full_url()))
283 ui.traceback()
283 ui.traceback()
284 raise IOError(None, inst)
284 raise IOError(None, inst)
285 finally:
285 finally:
286 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
286 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
287 code = res.code if res else -1
287 code = res.code if res else -1
288 dbg(line % ' finished in %.4f seconds (%d)'
288 dbg(line % ' finished in %.4f seconds (%d)'
289 % (util.timer() - start, code))
289 % (util.timer() - start, code))
290
290
291 # Insert error handlers for common I/O failures.
291 # Insert error handlers for common I/O failures.
292 urlmod.wrapresponse(res)
292 urlmod.wrapresponse(res)
293
293
294 return res
294 return res
295
295
296 class RedirectedRepoError(error.RepoError):
296 class RedirectedRepoError(error.RepoError):
297 def __init__(self, msg, respurl):
297 def __init__(self, msg, respurl):
298 super(RedirectedRepoError, self).__init__(msg)
298 super(RedirectedRepoError, self).__init__(msg)
299 self.respurl = respurl
299 self.respurl = respurl
300
300
301 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
301 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
302 allowcbor=False):
302 allowcbor=False):
303 # record the url we got redirected to
303 # record the url we got redirected to
304 redirected = False
304 redirected = False
305 respurl = pycompat.bytesurl(resp.geturl())
305 respurl = pycompat.bytesurl(resp.geturl())
306 if respurl.endswith(qs):
306 if respurl.endswith(qs):
307 respurl = respurl[:-len(qs)]
307 respurl = respurl[:-len(qs)]
308 qsdropped = False
308 qsdropped = False
309 else:
309 else:
310 qsdropped = True
310 qsdropped = True
311
311
312 if baseurl.rstrip('/') != respurl.rstrip('/'):
312 if baseurl.rstrip('/') != respurl.rstrip('/'):
313 redirected = True
313 redirected = True
314 if not ui.quiet:
314 if not ui.quiet:
315 ui.warn(_('real URL is %s\n') % respurl)
315 ui.warn(_('real URL is %s\n') % respurl)
316
316
317 try:
317 try:
318 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
318 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
319 except AttributeError:
319 except AttributeError:
320 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
320 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
321
321
322 safeurl = util.hidepassword(baseurl)
322 safeurl = util.hidepassword(baseurl)
323 if proto.startswith('application/hg-error'):
323 if proto.startswith('application/hg-error'):
324 raise error.OutOfBandError(resp.read())
324 raise error.OutOfBandError(resp.read())
325
325
326 # Pre 1.0 versions of Mercurial used text/plain and
326 # Pre 1.0 versions of Mercurial used text/plain and
327 # application/hg-changegroup. We don't support such old servers.
327 # application/hg-changegroup. We don't support such old servers.
328 if not proto.startswith('application/mercurial-'):
328 if not proto.startswith('application/mercurial-'):
329 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
329 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
330 msg = _("'%s' does not appear to be an hg repository:\n"
330 msg = _("'%s' does not appear to be an hg repository:\n"
331 "---%%<--- (%s)\n%s\n---%%<---\n") % (
331 "---%%<--- (%s)\n%s\n---%%<---\n") % (
332 safeurl, proto or 'no content-type', resp.read(1024))
332 safeurl, proto or 'no content-type', resp.read(1024))
333
333
334 # Some servers may strip the query string from the redirect. We
334 # Some servers may strip the query string from the redirect. We
335 # raise a special error type so callers can react to this specially.
335 # raise a special error type so callers can react to this specially.
336 if redirected and qsdropped:
336 if redirected and qsdropped:
337 raise RedirectedRepoError(msg, respurl)
337 raise RedirectedRepoError(msg, respurl)
338 else:
338 else:
339 raise error.RepoError(msg)
339 raise error.RepoError(msg)
340
340
341 try:
341 try:
342 subtype = proto.split('-', 1)[1]
342 subtype = proto.split('-', 1)[1]
343
343
344 # Unless we end up supporting CBOR in the legacy wire protocol,
344 # Unless we end up supporting CBOR in the legacy wire protocol,
345 # this should ONLY be encountered for the initial capabilities
345 # this should ONLY be encountered for the initial capabilities
346 # request during handshake.
346 # request during handshake.
347 if subtype == 'cbor':
347 if subtype == 'cbor':
348 if allowcbor:
348 if allowcbor:
349 return respurl, proto, resp
349 return respurl, proto, resp
350 else:
350 else:
351 raise error.RepoError(_('unexpected CBOR response from '
351 raise error.RepoError(_('unexpected CBOR response from '
352 'server'))
352 'server'))
353
353
354 version_info = tuple([int(n) for n in subtype.split('.')])
354 version_info = tuple([int(n) for n in subtype.split('.')])
355 except ValueError:
355 except ValueError:
356 raise error.RepoError(_("'%s' sent a broken Content-Type "
356 raise error.RepoError(_("'%s' sent a broken Content-Type "
357 "header (%s)") % (safeurl, proto))
357 "header (%s)") % (safeurl, proto))
358
358
359 # TODO consider switching to a decompression reader that uses
359 # TODO consider switching to a decompression reader that uses
360 # generators.
360 # generators.
361 if version_info == (0, 1):
361 if version_info == (0, 1):
362 if compressible:
362 if compressible:
363 resp = util.compengines['zlib'].decompressorreader(resp)
363 resp = util.compengines['zlib'].decompressorreader(resp)
364
364
365 elif version_info == (0, 2):
365 elif version_info == (0, 2):
366 # application/mercurial-0.2 always identifies the compression
366 # application/mercurial-0.2 always identifies the compression
367 # engine in the payload header.
367 # engine in the payload header.
368 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
368 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
369 ename = util.readexactly(resp, elen)
369 ename = util.readexactly(resp, elen)
370 engine = util.compengines.forwiretype(ename)
370 engine = util.compengines.forwiretype(ename)
371
371
372 resp = engine.decompressorreader(resp)
372 resp = engine.decompressorreader(resp)
373 else:
373 else:
374 raise error.RepoError(_("'%s' uses newer protocol %s") %
374 raise error.RepoError(_("'%s' uses newer protocol %s") %
375 (safeurl, subtype))
375 (safeurl, subtype))
376
376
377 return respurl, proto, resp
377 return respurl, proto, resp
378
378
379 class httppeer(wireprotov1peer.wirepeer):
379 class httppeer(wireprotov1peer.wirepeer):
380 def __init__(self, ui, path, url, opener, requestbuilder, caps):
380 def __init__(self, ui, path, url, opener, requestbuilder, caps):
381 self.ui = ui
381 self.ui = ui
382 self._path = path
382 self._path = path
383 self._url = url
383 self._url = url
384 self._caps = caps
384 self._caps = caps
385 self.limitedarguments = caps is not None and 'httppostargs' not in caps
385 self._urlopener = opener
386 self._urlopener = opener
386 self._requestbuilder = requestbuilder
387 self._requestbuilder = requestbuilder
387
388
388 def __del__(self):
389 def __del__(self):
389 for h in self._urlopener.handlers:
390 for h in self._urlopener.handlers:
390 h.close()
391 h.close()
391 getattr(h, "close_all", lambda: None)()
392 getattr(h, "close_all", lambda: None)()
392
393
393 # Begin of ipeerconnection interface.
394 # Begin of ipeerconnection interface.
394
395
395 def url(self):
396 def url(self):
396 return self._path
397 return self._path
397
398
398 def local(self):
399 def local(self):
399 return None
400 return None
400
401
401 def peer(self):
402 def peer(self):
402 return self
403 return self
403
404
404 def canpush(self):
405 def canpush(self):
405 return True
406 return True
406
407
407 def close(self):
408 def close(self):
408 try:
409 try:
409 reqs, sent, recv = (self._urlopener.requestscount,
410 reqs, sent, recv = (self._urlopener.requestscount,
410 self._urlopener.sentbytescount,
411 self._urlopener.sentbytescount,
411 self._urlopener.receivedbytescount)
412 self._urlopener.receivedbytescount)
412 except AttributeError:
413 except AttributeError:
413 return
414 return
414 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
415 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
415 'received %d bytes in responses)\n') %
416 'received %d bytes in responses)\n') %
416 (reqs, sent, recv))
417 (reqs, sent, recv))
417
418
418 # End of ipeerconnection interface.
419 # End of ipeerconnection interface.
419
420
420 # Begin of ipeercommands interface.
421 # Begin of ipeercommands interface.
421
422
422 def capabilities(self):
423 def capabilities(self):
423 return self._caps
424 return self._caps
424
425
425 # End of ipeercommands interface.
426 # End of ipeercommands interface.
426
427
427 def _callstream(self, cmd, _compressible=False, **args):
428 def _callstream(self, cmd, _compressible=False, **args):
428 args = pycompat.byteskwargs(args)
429 args = pycompat.byteskwargs(args)
429
430
430 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
431 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
431 self._caps, self.capable,
432 self._caps, self.capable,
432 self._url, cmd, args)
433 self._url, cmd, args)
433
434
434 resp = sendrequest(self.ui, self._urlopener, req)
435 resp = sendrequest(self.ui, self._urlopener, req)
435
436
436 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
437 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
437 resp, _compressible)
438 resp, _compressible)
438
439
439 return resp
440 return resp
440
441
441 def _call(self, cmd, **args):
442 def _call(self, cmd, **args):
442 fp = self._callstream(cmd, **args)
443 fp = self._callstream(cmd, **args)
443 try:
444 try:
444 return fp.read()
445 return fp.read()
445 finally:
446 finally:
446 # if using keepalive, allow connection to be reused
447 # if using keepalive, allow connection to be reused
447 fp.close()
448 fp.close()
448
449
449 def _callpush(self, cmd, cg, **args):
450 def _callpush(self, cmd, cg, **args):
450 # have to stream bundle to a temp file because we do not have
451 # have to stream bundle to a temp file because we do not have
451 # http 1.1 chunked transfer.
452 # http 1.1 chunked transfer.
452
453
453 types = self.capable('unbundle')
454 types = self.capable('unbundle')
454 try:
455 try:
455 types = types.split(',')
456 types = types.split(',')
456 except AttributeError:
457 except AttributeError:
457 # servers older than d1b16a746db6 will send 'unbundle' as a
458 # servers older than d1b16a746db6 will send 'unbundle' as a
458 # boolean capability. They only support headerless/uncompressed
459 # boolean capability. They only support headerless/uncompressed
459 # bundles.
460 # bundles.
460 types = [""]
461 types = [""]
461 for x in types:
462 for x in types:
462 if x in bundle2.bundletypes:
463 if x in bundle2.bundletypes:
463 type = x
464 type = x
464 break
465 break
465
466
466 tempname = bundle2.writebundle(self.ui, cg, None, type)
467 tempname = bundle2.writebundle(self.ui, cg, None, type)
467 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
468 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
468 headers = {r'Content-Type': r'application/mercurial-0.1'}
469 headers = {r'Content-Type': r'application/mercurial-0.1'}
469
470
470 try:
471 try:
471 r = self._call(cmd, data=fp, headers=headers, **args)
472 r = self._call(cmd, data=fp, headers=headers, **args)
472 vals = r.split('\n', 1)
473 vals = r.split('\n', 1)
473 if len(vals) < 2:
474 if len(vals) < 2:
474 raise error.ResponseError(_("unexpected response:"), r)
475 raise error.ResponseError(_("unexpected response:"), r)
475 return vals
476 return vals
476 except urlerr.httperror:
477 except urlerr.httperror:
477 # Catch and re-raise these so we don't try and treat them
478 # Catch and re-raise these so we don't try and treat them
478 # like generic socket errors. They lack any values in
479 # like generic socket errors. They lack any values in
479 # .args on Python 3 which breaks our socket.error block.
480 # .args on Python 3 which breaks our socket.error block.
480 raise
481 raise
481 except socket.error as err:
482 except socket.error as err:
482 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
483 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
483 raise error.Abort(_('push failed: %s') % err.args[1])
484 raise error.Abort(_('push failed: %s') % err.args[1])
484 raise error.Abort(err.args[1])
485 raise error.Abort(err.args[1])
485 finally:
486 finally:
486 fp.close()
487 fp.close()
487 os.unlink(tempname)
488 os.unlink(tempname)
488
489
489 def _calltwowaystream(self, cmd, fp, **args):
490 def _calltwowaystream(self, cmd, fp, **args):
490 fh = None
491 fh = None
491 fp_ = None
492 fp_ = None
492 filename = None
493 filename = None
493 try:
494 try:
494 # dump bundle to disk
495 # dump bundle to disk
495 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
496 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
496 fh = os.fdopen(fd, r"wb")
497 fh = os.fdopen(fd, r"wb")
497 d = fp.read(4096)
498 d = fp.read(4096)
498 while d:
499 while d:
499 fh.write(d)
500 fh.write(d)
500 d = fp.read(4096)
501 d = fp.read(4096)
501 fh.close()
502 fh.close()
502 # start http push
503 # start http push
503 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
504 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
504 headers = {r'Content-Type': r'application/mercurial-0.1'}
505 headers = {r'Content-Type': r'application/mercurial-0.1'}
505 return self._callstream(cmd, data=fp_, headers=headers, **args)
506 return self._callstream(cmd, data=fp_, headers=headers, **args)
506 finally:
507 finally:
507 if fp_ is not None:
508 if fp_ is not None:
508 fp_.close()
509 fp_.close()
509 if fh is not None:
510 if fh is not None:
510 fh.close()
511 fh.close()
511 os.unlink(filename)
512 os.unlink(filename)
512
513
513 def _callcompressable(self, cmd, **args):
514 def _callcompressable(self, cmd, **args):
514 return self._callstream(cmd, _compressible=True, **args)
515 return self._callstream(cmd, _compressible=True, **args)
515
516
516 def _abort(self, exception):
517 def _abort(self, exception):
517 raise exception
518 raise exception
518
519
519 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
520 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
520 redirect):
521 redirect):
521 wireprotoframing.populatestreamencoders()
522 wireprotoframing.populatestreamencoders()
522
523
523 uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
524 uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
524
525
525 if uiencoders:
526 if uiencoders:
526 encoders = []
527 encoders = []
527
528
528 for encoder in uiencoders:
529 for encoder in uiencoders:
529 if encoder not in wireprotoframing.STREAM_ENCODERS:
530 if encoder not in wireprotoframing.STREAM_ENCODERS:
530 ui.warn(_(b'wire protocol version 2 encoder referenced in '
531 ui.warn(_(b'wire protocol version 2 encoder referenced in '
531 b'config (%s) is not known; ignoring\n') % encoder)
532 b'config (%s) is not known; ignoring\n') % encoder)
532 else:
533 else:
533 encoders.append(encoder)
534 encoders.append(encoder)
534
535
535 else:
536 else:
536 encoders = wireprotoframing.STREAM_ENCODERS_ORDER
537 encoders = wireprotoframing.STREAM_ENCODERS_ORDER
537
538
538 reactor = wireprotoframing.clientreactor(ui,
539 reactor = wireprotoframing.clientreactor(ui,
539 hasmultiplesend=False,
540 hasmultiplesend=False,
540 buffersends=True,
541 buffersends=True,
541 clientcontentencoders=encoders)
542 clientcontentencoders=encoders)
542
543
543 handler = wireprotov2peer.clienthandler(ui, reactor,
544 handler = wireprotov2peer.clienthandler(ui, reactor,
544 opener=opener,
545 opener=opener,
545 requestbuilder=requestbuilder)
546 requestbuilder=requestbuilder)
546
547
547 url = '%s/%s' % (apiurl, permission)
548 url = '%s/%s' % (apiurl, permission)
548
549
549 if len(requests) > 1:
550 if len(requests) > 1:
550 url += '/multirequest'
551 url += '/multirequest'
551 else:
552 else:
552 url += '/%s' % requests[0][0]
553 url += '/%s' % requests[0][0]
553
554
554 ui.debug('sending %d commands\n' % len(requests))
555 ui.debug('sending %d commands\n' % len(requests))
555 for command, args, f in requests:
556 for command, args, f in requests:
556 ui.debug('sending command %s: %s\n' % (
557 ui.debug('sending command %s: %s\n' % (
557 command, stringutil.pprint(args, indent=2)))
558 command, stringutil.pprint(args, indent=2)))
558 assert not list(handler.callcommand(command, args, f,
559 assert not list(handler.callcommand(command, args, f,
559 redirect=redirect))
560 redirect=redirect))
560
561
561 # TODO stream this.
562 # TODO stream this.
562 body = b''.join(map(bytes, handler.flushcommands()))
563 body = b''.join(map(bytes, handler.flushcommands()))
563
564
564 # TODO modify user-agent to reflect v2
565 # TODO modify user-agent to reflect v2
565 headers = {
566 headers = {
566 r'Accept': wireprotov2server.FRAMINGTYPE,
567 r'Accept': wireprotov2server.FRAMINGTYPE,
567 r'Content-Type': wireprotov2server.FRAMINGTYPE,
568 r'Content-Type': wireprotov2server.FRAMINGTYPE,
568 }
569 }
569
570
570 req = requestbuilder(pycompat.strurl(url), body, headers)
571 req = requestbuilder(pycompat.strurl(url), body, headers)
571 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
572 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
572
573
573 try:
574 try:
574 res = opener.open(req)
575 res = opener.open(req)
575 except urlerr.httperror as e:
576 except urlerr.httperror as e:
576 if e.code == 401:
577 if e.code == 401:
577 raise error.Abort(_('authorization failed'))
578 raise error.Abort(_('authorization failed'))
578
579
579 raise
580 raise
580 except httplib.HTTPException as e:
581 except httplib.HTTPException as e:
581 ui.traceback()
582 ui.traceback()
582 raise IOError(None, e)
583 raise IOError(None, e)
583
584
584 return handler, res
585 return handler, res
585
586
586 class queuedcommandfuture(pycompat.futures.Future):
587 class queuedcommandfuture(pycompat.futures.Future):
587 """Wraps result() on command futures to trigger submission on call."""
588 """Wraps result() on command futures to trigger submission on call."""
588
589
589 def result(self, timeout=None):
590 def result(self, timeout=None):
590 if self.done():
591 if self.done():
591 return pycompat.futures.Future.result(self, timeout)
592 return pycompat.futures.Future.result(self, timeout)
592
593
593 self._peerexecutor.sendcommands()
594 self._peerexecutor.sendcommands()
594
595
595 # sendcommands() will restore the original __class__ and self.result
596 # sendcommands() will restore the original __class__ and self.result
596 # will resolve to Future.result.
597 # will resolve to Future.result.
597 return self.result(timeout)
598 return self.result(timeout)
598
599
599 @interfaceutil.implementer(repository.ipeercommandexecutor)
600 @interfaceutil.implementer(repository.ipeercommandexecutor)
600 class httpv2executor(object):
601 class httpv2executor(object):
601 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
602 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
602 redirect):
603 redirect):
603 self._ui = ui
604 self._ui = ui
604 self._opener = opener
605 self._opener = opener
605 self._requestbuilder = requestbuilder
606 self._requestbuilder = requestbuilder
606 self._apiurl = apiurl
607 self._apiurl = apiurl
607 self._descriptor = descriptor
608 self._descriptor = descriptor
608 self._redirect = redirect
609 self._redirect = redirect
609 self._sent = False
610 self._sent = False
610 self._closed = False
611 self._closed = False
611 self._neededpermissions = set()
612 self._neededpermissions = set()
612 self._calls = []
613 self._calls = []
613 self._futures = weakref.WeakSet()
614 self._futures = weakref.WeakSet()
614 self._responseexecutor = None
615 self._responseexecutor = None
615 self._responsef = None
616 self._responsef = None
616
617
617 def __enter__(self):
618 def __enter__(self):
618 return self
619 return self
619
620
620 def __exit__(self, exctype, excvalue, exctb):
621 def __exit__(self, exctype, excvalue, exctb):
621 self.close()
622 self.close()
622
623
623 def callcommand(self, command, args):
624 def callcommand(self, command, args):
624 if self._sent:
625 if self._sent:
625 raise error.ProgrammingError('callcommand() cannot be used after '
626 raise error.ProgrammingError('callcommand() cannot be used after '
626 'commands are sent')
627 'commands are sent')
627
628
628 if self._closed:
629 if self._closed:
629 raise error.ProgrammingError('callcommand() cannot be used after '
630 raise error.ProgrammingError('callcommand() cannot be used after '
630 'close()')
631 'close()')
631
632
632 # The service advertises which commands are available. So if we attempt
633 # The service advertises which commands are available. So if we attempt
633 # to call an unknown command or pass an unknown argument, we can screen
634 # to call an unknown command or pass an unknown argument, we can screen
634 # for this.
635 # for this.
635 if command not in self._descriptor['commands']:
636 if command not in self._descriptor['commands']:
636 raise error.ProgrammingError(
637 raise error.ProgrammingError(
637 'wire protocol command %s is not available' % command)
638 'wire protocol command %s is not available' % command)
638
639
639 cmdinfo = self._descriptor['commands'][command]
640 cmdinfo = self._descriptor['commands'][command]
640 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
641 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
641
642
642 if unknownargs:
643 if unknownargs:
643 raise error.ProgrammingError(
644 raise error.ProgrammingError(
644 'wire protocol command %s does not accept argument: %s' % (
645 'wire protocol command %s does not accept argument: %s' % (
645 command, ', '.join(sorted(unknownargs))))
646 command, ', '.join(sorted(unknownargs))))
646
647
647 self._neededpermissions |= set(cmdinfo['permissions'])
648 self._neededpermissions |= set(cmdinfo['permissions'])
648
649
649 # TODO we /could/ also validate types here, since the API descriptor
650 # TODO we /could/ also validate types here, since the API descriptor
650 # includes types...
651 # includes types...
651
652
652 f = pycompat.futures.Future()
653 f = pycompat.futures.Future()
653
654
654 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
655 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
655 # could deadlock.
656 # could deadlock.
656 f.__class__ = queuedcommandfuture
657 f.__class__ = queuedcommandfuture
657 f._peerexecutor = self
658 f._peerexecutor = self
658
659
659 self._futures.add(f)
660 self._futures.add(f)
660 self._calls.append((command, args, f))
661 self._calls.append((command, args, f))
661
662
662 return f
663 return f
663
664
664 def sendcommands(self):
665 def sendcommands(self):
665 if self._sent:
666 if self._sent:
666 return
667 return
667
668
668 if not self._calls:
669 if not self._calls:
669 return
670 return
670
671
671 self._sent = True
672 self._sent = True
672
673
673 # Unhack any future types so caller sees a clean type and so we
674 # Unhack any future types so caller sees a clean type and so we
674 # break reference cycle.
675 # break reference cycle.
675 for f in self._futures:
676 for f in self._futures:
676 if isinstance(f, queuedcommandfuture):
677 if isinstance(f, queuedcommandfuture):
677 f.__class__ = pycompat.futures.Future
678 f.__class__ = pycompat.futures.Future
678 f._peerexecutor = None
679 f._peerexecutor = None
679
680
680 # Mark the future as running and filter out cancelled futures.
681 # Mark the future as running and filter out cancelled futures.
681 calls = [(command, args, f)
682 calls = [(command, args, f)
682 for command, args, f in self._calls
683 for command, args, f in self._calls
683 if f.set_running_or_notify_cancel()]
684 if f.set_running_or_notify_cancel()]
684
685
685 # Clear out references, prevent improper object usage.
686 # Clear out references, prevent improper object usage.
686 self._calls = None
687 self._calls = None
687
688
688 if not calls:
689 if not calls:
689 return
690 return
690
691
691 permissions = set(self._neededpermissions)
692 permissions = set(self._neededpermissions)
692
693
693 if 'push' in permissions and 'pull' in permissions:
694 if 'push' in permissions and 'pull' in permissions:
694 permissions.remove('pull')
695 permissions.remove('pull')
695
696
696 if len(permissions) > 1:
697 if len(permissions) > 1:
697 raise error.RepoError(_('cannot make request requiring multiple '
698 raise error.RepoError(_('cannot make request requiring multiple '
698 'permissions: %s') %
699 'permissions: %s') %
699 _(', ').join(sorted(permissions)))
700 _(', ').join(sorted(permissions)))
700
701
701 permission = {
702 permission = {
702 'push': 'rw',
703 'push': 'rw',
703 'pull': 'ro',
704 'pull': 'ro',
704 }[permissions.pop()]
705 }[permissions.pop()]
705
706
706 handler, resp = sendv2request(
707 handler, resp = sendv2request(
707 self._ui, self._opener, self._requestbuilder, self._apiurl,
708 self._ui, self._opener, self._requestbuilder, self._apiurl,
708 permission, calls, self._redirect)
709 permission, calls, self._redirect)
709
710
710 # TODO we probably want to validate the HTTP code, media type, etc.
711 # TODO we probably want to validate the HTTP code, media type, etc.
711
712
712 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
713 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
713 self._responsef = self._responseexecutor.submit(self._handleresponse,
714 self._responsef = self._responseexecutor.submit(self._handleresponse,
714 handler, resp)
715 handler, resp)
715
716
716 def close(self):
717 def close(self):
717 if self._closed:
718 if self._closed:
718 return
719 return
719
720
720 self.sendcommands()
721 self.sendcommands()
721
722
722 self._closed = True
723 self._closed = True
723
724
724 if not self._responsef:
725 if not self._responsef:
725 return
726 return
726
727
727 # TODO ^C here may not result in immediate program termination.
728 # TODO ^C here may not result in immediate program termination.
728
729
729 try:
730 try:
730 self._responsef.result()
731 self._responsef.result()
731 finally:
732 finally:
732 self._responseexecutor.shutdown(wait=True)
733 self._responseexecutor.shutdown(wait=True)
733 self._responsef = None
734 self._responsef = None
734 self._responseexecutor = None
735 self._responseexecutor = None
735
736
736 # If any of our futures are still in progress, mark them as
737 # If any of our futures are still in progress, mark them as
737 # errored, otherwise a result() could wait indefinitely.
738 # errored, otherwise a result() could wait indefinitely.
738 for f in self._futures:
739 for f in self._futures:
739 if not f.done():
740 if not f.done():
740 f.set_exception(error.ResponseError(
741 f.set_exception(error.ResponseError(
741 _('unfulfilled command response')))
742 _('unfulfilled command response')))
742
743
743 self._futures = None
744 self._futures = None
744
745
745 def _handleresponse(self, handler, resp):
746 def _handleresponse(self, handler, resp):
746 # Called in a thread to read the response.
747 # Called in a thread to read the response.
747
748
748 while handler.readdata(resp):
749 while handler.readdata(resp):
749 pass
750 pass
750
751
751 @interfaceutil.implementer(repository.ipeerv2)
752 @interfaceutil.implementer(repository.ipeerv2)
752 class httpv2peer(object):
753 class httpv2peer(object):
754
755 limitedarguments = False
756
753 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
757 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
754 apidescriptor):
758 apidescriptor):
755 self.ui = ui
759 self.ui = ui
756 self.apidescriptor = apidescriptor
760 self.apidescriptor = apidescriptor
757
761
758 if repourl.endswith('/'):
762 if repourl.endswith('/'):
759 repourl = repourl[:-1]
763 repourl = repourl[:-1]
760
764
761 self._url = repourl
765 self._url = repourl
762 self._apipath = apipath
766 self._apipath = apipath
763 self._apiurl = '%s/%s' % (repourl, apipath)
767 self._apiurl = '%s/%s' % (repourl, apipath)
764 self._opener = opener
768 self._opener = opener
765 self._requestbuilder = requestbuilder
769 self._requestbuilder = requestbuilder
766
770
767 self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
771 self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
768
772
769 # Start of ipeerconnection.
773 # Start of ipeerconnection.
770
774
771 def url(self):
775 def url(self):
772 return self._url
776 return self._url
773
777
774 def local(self):
778 def local(self):
775 return None
779 return None
776
780
777 def peer(self):
781 def peer(self):
778 return self
782 return self
779
783
780 def canpush(self):
784 def canpush(self):
781 # TODO change once implemented.
785 # TODO change once implemented.
782 return False
786 return False
783
787
784 def close(self):
788 def close(self):
785 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
789 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
786 'received %d bytes in responses)\n') %
790 'received %d bytes in responses)\n') %
787 (self._opener.requestscount,
791 (self._opener.requestscount,
788 self._opener.sentbytescount,
792 self._opener.sentbytescount,
789 self._opener.receivedbytescount))
793 self._opener.receivedbytescount))
790
794
791 # End of ipeerconnection.
795 # End of ipeerconnection.
792
796
793 # Start of ipeercapabilities.
797 # Start of ipeercapabilities.
794
798
795 def capable(self, name):
799 def capable(self, name):
796 # The capabilities used internally historically map to capabilities
800 # The capabilities used internally historically map to capabilities
797 # advertised from the "capabilities" wire protocol command. However,
801 # advertised from the "capabilities" wire protocol command. However,
798 # version 2 of that command works differently.
802 # version 2 of that command works differently.
799
803
800 # Maps to commands that are available.
804 # Maps to commands that are available.
801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
805 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
802 return True
806 return True
803
807
804 # Other concepts.
808 # Other concepts.
805 if name in ('bundle2'):
809 if name in ('bundle2'):
806 return True
810 return True
807
811
808 # Alias command-* to presence of command of that name.
812 # Alias command-* to presence of command of that name.
809 if name.startswith('command-'):
813 if name.startswith('command-'):
810 return name[len('command-'):] in self.apidescriptor['commands']
814 return name[len('command-'):] in self.apidescriptor['commands']
811
815
812 return False
816 return False
813
817
814 def requirecap(self, name, purpose):
818 def requirecap(self, name, purpose):
815 if self.capable(name):
819 if self.capable(name):
816 return
820 return
817
821
818 raise error.CapabilityError(
822 raise error.CapabilityError(
819 _('cannot %s; client or remote repository does not support the '
823 _('cannot %s; client or remote repository does not support the '
820 '\'%s\' capability') % (purpose, name))
824 '\'%s\' capability') % (purpose, name))
821
825
822 # End of ipeercapabilities.
826 # End of ipeercapabilities.
823
827
824 def _call(self, name, **args):
828 def _call(self, name, **args):
825 with self.commandexecutor() as e:
829 with self.commandexecutor() as e:
826 return e.callcommand(name, args).result()
830 return e.callcommand(name, args).result()
827
831
828 def commandexecutor(self):
832 def commandexecutor(self):
829 return httpv2executor(self.ui, self._opener, self._requestbuilder,
833 return httpv2executor(self.ui, self._opener, self._requestbuilder,
830 self._apiurl, self.apidescriptor, self._redirect)
834 self._apiurl, self.apidescriptor, self._redirect)
831
835
832 # Registry of API service names to metadata about peers that handle it.
836 # Registry of API service names to metadata about peers that handle it.
833 #
837 #
834 # The following keys are meaningful:
838 # The following keys are meaningful:
835 #
839 #
836 # init
840 # init
837 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
841 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
838 # apidescriptor) to create a peer.
842 # apidescriptor) to create a peer.
839 #
843 #
840 # priority
844 # priority
841 # Integer priority for the service. If we could choose from multiple
845 # Integer priority for the service. If we could choose from multiple
842 # services, we choose the one with the highest priority.
846 # services, we choose the one with the highest priority.
843 API_PEERS = {
847 API_PEERS = {
844 wireprototypes.HTTP_WIREPROTO_V2: {
848 wireprototypes.HTTP_WIREPROTO_V2: {
845 'init': httpv2peer,
849 'init': httpv2peer,
846 'priority': 50,
850 'priority': 50,
847 },
851 },
848 }
852 }
849
853
850 def performhandshake(ui, url, opener, requestbuilder):
854 def performhandshake(ui, url, opener, requestbuilder):
851 # The handshake is a request to the capabilities command.
855 # The handshake is a request to the capabilities command.
852
856
853 caps = None
857 caps = None
854 def capable(x):
858 def capable(x):
855 raise error.ProgrammingError('should not be called')
859 raise error.ProgrammingError('should not be called')
856
860
857 args = {}
861 args = {}
858
862
859 # The client advertises support for newer protocols by adding an
863 # The client advertises support for newer protocols by adding an
860 # X-HgUpgrade-* header with a list of supported APIs and an
864 # X-HgUpgrade-* header with a list of supported APIs and an
861 # X-HgProto-* header advertising which serializing formats it supports.
865 # X-HgProto-* header advertising which serializing formats it supports.
862 # We only support the HTTP version 2 transport and CBOR responses for
866 # We only support the HTTP version 2 transport and CBOR responses for
863 # now.
867 # now.
864 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
868 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
865
869
866 if advertisev2:
870 if advertisev2:
867 args['headers'] = {
871 args['headers'] = {
868 r'X-HgProto-1': r'cbor',
872 r'X-HgProto-1': r'cbor',
869 }
873 }
870
874
871 args['headers'].update(
875 args['headers'].update(
872 encodevalueinheaders(' '.join(sorted(API_PEERS)),
876 encodevalueinheaders(' '.join(sorted(API_PEERS)),
873 'X-HgUpgrade',
877 'X-HgUpgrade',
874 # We don't know the header limit this early.
878 # We don't know the header limit this early.
875 # So make it small.
879 # So make it small.
876 1024))
880 1024))
877
881
878 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
882 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
879 capable, url, 'capabilities',
883 capable, url, 'capabilities',
880 args)
884 args)
881 resp = sendrequest(ui, opener, req)
885 resp = sendrequest(ui, opener, req)
882
886
883 # The server may redirect us to the repo root, stripping the
887 # The server may redirect us to the repo root, stripping the
884 # ?cmd=capabilities query string from the URL. The server would likely
888 # ?cmd=capabilities query string from the URL. The server would likely
885 # return HTML in this case and ``parsev1commandresponse()`` would raise.
889 # return HTML in this case and ``parsev1commandresponse()`` would raise.
886 # We catch this special case and re-issue the capabilities request against
890 # We catch this special case and re-issue the capabilities request against
887 # the new URL.
891 # the new URL.
888 #
892 #
889 # We should ideally not do this, as a redirect that drops the query
893 # We should ideally not do this, as a redirect that drops the query
890 # string from the URL is arguably a server bug. (Garbage in, garbage out).
894 # string from the URL is arguably a server bug. (Garbage in, garbage out).
891 # However, Mercurial clients for several years appeared to handle this
895 # However, Mercurial clients for several years appeared to handle this
892 # issue without behavior degradation. And according to issue 5860, it may
896 # issue without behavior degradation. And according to issue 5860, it may
893 # be a longstanding bug in some server implementations. So we allow a
897 # be a longstanding bug in some server implementations. So we allow a
894 # redirect that drops the query string to "just work."
898 # redirect that drops the query string to "just work."
895 try:
899 try:
896 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
900 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
897 compressible=False,
901 compressible=False,
898 allowcbor=advertisev2)
902 allowcbor=advertisev2)
899 except RedirectedRepoError as e:
903 except RedirectedRepoError as e:
900 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
904 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
901 capable, e.respurl,
905 capable, e.respurl,
902 'capabilities', args)
906 'capabilities', args)
903 resp = sendrequest(ui, opener, req)
907 resp = sendrequest(ui, opener, req)
904 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
908 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
905 compressible=False,
909 compressible=False,
906 allowcbor=advertisev2)
910 allowcbor=advertisev2)
907
911
908 try:
912 try:
909 rawdata = resp.read()
913 rawdata = resp.read()
910 finally:
914 finally:
911 resp.close()
915 resp.close()
912
916
913 if not ct.startswith('application/mercurial-'):
917 if not ct.startswith('application/mercurial-'):
914 raise error.ProgrammingError('unexpected content-type: %s' % ct)
918 raise error.ProgrammingError('unexpected content-type: %s' % ct)
915
919
916 if advertisev2:
920 if advertisev2:
917 if ct == 'application/mercurial-cbor':
921 if ct == 'application/mercurial-cbor':
918 try:
922 try:
919 info = cborutil.decodeall(rawdata)[0]
923 info = cborutil.decodeall(rawdata)[0]
920 except cborutil.CBORDecodeError:
924 except cborutil.CBORDecodeError:
921 raise error.Abort(_('error decoding CBOR from remote server'),
925 raise error.Abort(_('error decoding CBOR from remote server'),
922 hint=_('try again and consider contacting '
926 hint=_('try again and consider contacting '
923 'the server operator'))
927 'the server operator'))
924
928
925 # We got a legacy response. That's fine.
929 # We got a legacy response. That's fine.
926 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
930 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
927 info = {
931 info = {
928 'v1capabilities': set(rawdata.split())
932 'v1capabilities': set(rawdata.split())
929 }
933 }
930
934
931 else:
935 else:
932 raise error.RepoError(
936 raise error.RepoError(
933 _('unexpected response type from server: %s') % ct)
937 _('unexpected response type from server: %s') % ct)
934 else:
938 else:
935 info = {
939 info = {
936 'v1capabilities': set(rawdata.split())
940 'v1capabilities': set(rawdata.split())
937 }
941 }
938
942
939 return respurl, info
943 return respurl, info
940
944
941 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
945 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
942 """Construct an appropriate HTTP peer instance.
946 """Construct an appropriate HTTP peer instance.
943
947
944 ``opener`` is an ``url.opener`` that should be used to establish
948 ``opener`` is an ``url.opener`` that should be used to establish
945 connections, perform HTTP requests.
949 connections, perform HTTP requests.
946
950
947 ``requestbuilder`` is the type used for constructing HTTP requests.
951 ``requestbuilder`` is the type used for constructing HTTP requests.
948 It exists as an argument so extensions can override the default.
952 It exists as an argument so extensions can override the default.
949 """
953 """
950 u = util.url(path)
954 u = util.url(path)
951 if u.query or u.fragment:
955 if u.query or u.fragment:
952 raise error.Abort(_('unsupported URL component: "%s"') %
956 raise error.Abort(_('unsupported URL component: "%s"') %
953 (u.query or u.fragment))
957 (u.query or u.fragment))
954
958
955 # urllib cannot handle URLs with embedded user or passwd.
959 # urllib cannot handle URLs with embedded user or passwd.
956 url, authinfo = u.authinfo()
960 url, authinfo = u.authinfo()
957 ui.debug('using %s\n' % url)
961 ui.debug('using %s\n' % url)
958
962
959 opener = opener or urlmod.opener(ui, authinfo)
963 opener = opener or urlmod.opener(ui, authinfo)
960
964
961 respurl, info = performhandshake(ui, url, opener, requestbuilder)
965 respurl, info = performhandshake(ui, url, opener, requestbuilder)
962
966
963 # Given the intersection of APIs that both we and the server support,
967 # Given the intersection of APIs that both we and the server support,
964 # sort by their advertised priority and pick the first one.
968 # sort by their advertised priority and pick the first one.
965 #
969 #
966 # TODO consider making this request-based and interface driven. For
970 # TODO consider making this request-based and interface driven. For
967 # example, the caller could say "I want a peer that does X." It's quite
971 # example, the caller could say "I want a peer that does X." It's quite
968 # possible that not all peers would do that. Since we know the service
972 # possible that not all peers would do that. Since we know the service
969 # capabilities, we could filter out services not meeting the
973 # capabilities, we could filter out services not meeting the
970 # requirements. Possibly by consulting the interfaces defined by the
974 # requirements. Possibly by consulting the interfaces defined by the
971 # peer type.
975 # peer type.
972 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
976 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
973
977
974 preferredchoices = sorted(apipeerchoices,
978 preferredchoices = sorted(apipeerchoices,
975 key=lambda x: API_PEERS[x]['priority'],
979 key=lambda x: API_PEERS[x]['priority'],
976 reverse=True)
980 reverse=True)
977
981
978 for service in preferredchoices:
982 for service in preferredchoices:
979 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
983 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
980
984
981 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
985 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
982 requestbuilder,
986 requestbuilder,
983 info['apis'][service])
987 info['apis'][service])
984
988
985 # Failed to construct an API peer. Fall back to legacy.
989 # Failed to construct an API peer. Fall back to legacy.
986 return httppeer(ui, path, respurl, opener, requestbuilder,
990 return httppeer(ui, path, respurl, opener, requestbuilder,
987 info['v1capabilities'])
991 info['v1capabilities'])
988
992
989 def instance(ui, path, create, intents=None, createopts=None):
993 def instance(ui, path, create, intents=None, createopts=None):
990 if create:
994 if create:
991 raise error.Abort(_('cannot create new http repository'))
995 raise error.Abort(_('cannot create new http repository'))
992 try:
996 try:
993 if path.startswith('https:') and not urlmod.has_https:
997 if path.startswith('https:') and not urlmod.has_https:
994 raise error.Abort(_('Python support for SSL and HTTPS '
998 raise error.Abort(_('Python support for SSL and HTTPS '
995 'is not installed'))
999 'is not installed'))
996
1000
997 inst = makepeer(ui, path)
1001 inst = makepeer(ui, path)
998
1002
999 return inst
1003 return inst
1000 except error.RepoError as httpexception:
1004 except error.RepoError as httpexception:
1001 try:
1005 try:
1002 r = statichttprepo.instance(ui, "static-" + path, create)
1006 r = statichttprepo.instance(ui, "static-" + path, create)
1003 ui.note(_('(falling back to static-http)\n'))
1007 ui.note(_('(falling back to static-http)\n'))
1004 return r
1008 return r
1005 except error.RepoError:
1009 except error.RepoError:
1006 raise httpexception # use the original http RepoError instead
1010 raise httpexception # use the original http RepoError instead
@@ -1,1864 +1,1870
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
34
35 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
38
39 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
41
42 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
45 CG_DELTAMODE_P1 = b'p1'
46
46
47 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
49
49
50 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
52
52
53 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
54 outside of this module.
54 outside of this module.
55 """
55 """
56 ui = interfaceutil.Attribute("""ui.ui instance""")
56 ui = interfaceutil.Attribute("""ui.ui instance""")
57
57
58 def url():
58 def url():
59 """Returns a URL string representing this peer.
59 """Returns a URL string representing this peer.
60
60
61 Currently, implementations expose the raw URL used to construct the
61 Currently, implementations expose the raw URL used to construct the
62 instance. It may contain credentials as part of the URL. The
62 instance. It may contain credentials as part of the URL. The
63 expectations of the value aren't well-defined and this could lead to
63 expectations of the value aren't well-defined and this could lead to
64 data leakage.
64 data leakage.
65
65
66 TODO audit/clean consumers and more clearly define the contents of this
66 TODO audit/clean consumers and more clearly define the contents of this
67 value.
67 value.
68 """
68 """
69
69
70 def local():
70 def local():
71 """Returns a local repository instance.
71 """Returns a local repository instance.
72
72
73 If the peer represents a local repository, returns an object that
73 If the peer represents a local repository, returns an object that
74 can be used to interface with it. Otherwise returns ``None``.
74 can be used to interface with it. Otherwise returns ``None``.
75 """
75 """
76
76
77 def peer():
77 def peer():
78 """Returns an object conforming to this interface.
78 """Returns an object conforming to this interface.
79
79
80 Most implementations will ``return self``.
80 Most implementations will ``return self``.
81 """
81 """
82
82
83 def canpush():
83 def canpush():
84 """Returns a boolean indicating if this peer can be pushed to."""
84 """Returns a boolean indicating if this peer can be pushed to."""
85
85
86 def close():
86 def close():
87 """Close the connection to this peer.
87 """Close the connection to this peer.
88
88
89 This is called when the peer will no longer be used. Resources
89 This is called when the peer will no longer be used. Resources
90 associated with the peer should be cleaned up.
90 associated with the peer should be cleaned up.
91 """
91 """
92
92
93 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
95
95
96 def capable(name):
96 def capable(name):
97 """Determine support for a named capability.
97 """Determine support for a named capability.
98
98
99 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
100
100
101 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
102 if capability support is non-boolean.
103
103
104 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
105 """
105 """
106
106
107 def requirecap(name, purpose):
107 def requirecap(name, purpose):
108 """Require a capability to be present.
108 """Require a capability to be present.
109
109
110 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
111 """
112
112
113 class ipeercommands(interfaceutil.Interface):
113 class ipeercommands(interfaceutil.Interface):
114 """Client-side interface for communicating over the wire protocol.
114 """Client-side interface for communicating over the wire protocol.
115
115
116 This interface is used as a gateway to the Mercurial wire protocol.
116 This interface is used as a gateway to the Mercurial wire protocol.
117 methods commonly call wire protocol commands of the same name.
117 methods commonly call wire protocol commands of the same name.
118 """
118 """
119
119
120 def branchmap():
120 def branchmap():
121 """Obtain heads in named branches.
121 """Obtain heads in named branches.
122
122
123 Returns a dict mapping branch name to an iterable of nodes that are
123 Returns a dict mapping branch name to an iterable of nodes that are
124 heads on that branch.
124 heads on that branch.
125 """
125 """
126
126
127 def capabilities():
127 def capabilities():
128 """Obtain capabilities of the peer.
128 """Obtain capabilities of the peer.
129
129
130 Returns a set of string capabilities.
130 Returns a set of string capabilities.
131 """
131 """
132
132
133 def clonebundles():
133 def clonebundles():
134 """Obtains the clone bundles manifest for the repo.
134 """Obtains the clone bundles manifest for the repo.
135
135
136 Returns the manifest as unparsed bytes.
136 Returns the manifest as unparsed bytes.
137 """
137 """
138
138
139 def debugwireargs(one, two, three=None, four=None, five=None):
139 def debugwireargs(one, two, three=None, four=None, five=None):
140 """Used to facilitate debugging of arguments passed over the wire."""
140 """Used to facilitate debugging of arguments passed over the wire."""
141
141
142 def getbundle(source, **kwargs):
142 def getbundle(source, **kwargs):
143 """Obtain remote repository data as a bundle.
143 """Obtain remote repository data as a bundle.
144
144
145 This command is how the bulk of repository data is transferred from
145 This command is how the bulk of repository data is transferred from
146 the peer to the local repository
146 the peer to the local repository
147
147
148 Returns a generator of bundle data.
148 Returns a generator of bundle data.
149 """
149 """
150
150
151 def heads():
151 def heads():
152 """Determine all known head revisions in the peer.
152 """Determine all known head revisions in the peer.
153
153
154 Returns an iterable of binary nodes.
154 Returns an iterable of binary nodes.
155 """
155 """
156
156
157 def known(nodes):
157 def known(nodes):
158 """Determine whether multiple nodes are known.
158 """Determine whether multiple nodes are known.
159
159
160 Accepts an iterable of nodes whose presence to check for.
160 Accepts an iterable of nodes whose presence to check for.
161
161
162 Returns an iterable of booleans indicating of the corresponding node
162 Returns an iterable of booleans indicating of the corresponding node
163 at that index is known to the peer.
163 at that index is known to the peer.
164 """
164 """
165
165
166 def listkeys(namespace):
166 def listkeys(namespace):
167 """Obtain all keys in a pushkey namespace.
167 """Obtain all keys in a pushkey namespace.
168
168
169 Returns an iterable of key names.
169 Returns an iterable of key names.
170 """
170 """
171
171
172 def lookup(key):
172 def lookup(key):
173 """Resolve a value to a known revision.
173 """Resolve a value to a known revision.
174
174
175 Returns a binary node of the resolved revision on success.
175 Returns a binary node of the resolved revision on success.
176 """
176 """
177
177
178 def pushkey(namespace, key, old, new):
178 def pushkey(namespace, key, old, new):
179 """Set a value using the ``pushkey`` protocol.
179 """Set a value using the ``pushkey`` protocol.
180
180
181 Arguments correspond to the pushkey namespace and key to operate on and
181 Arguments correspond to the pushkey namespace and key to operate on and
182 the old and new values for that key.
182 the old and new values for that key.
183
183
184 Returns a string with the peer result. The value inside varies by the
184 Returns a string with the peer result. The value inside varies by the
185 namespace.
185 namespace.
186 """
186 """
187
187
188 def stream_out():
188 def stream_out():
189 """Obtain streaming clone data.
189 """Obtain streaming clone data.
190
190
191 Successful result should be a generator of data chunks.
191 Successful result should be a generator of data chunks.
192 """
192 """
193
193
194 def unbundle(bundle, heads, url):
194 def unbundle(bundle, heads, url):
195 """Transfer repository data to the peer.
195 """Transfer repository data to the peer.
196
196
197 This is how the bulk of data during a push is transferred.
197 This is how the bulk of data during a push is transferred.
198
198
199 Returns the integer number of heads added to the peer.
199 Returns the integer number of heads added to the peer.
200 """
200 """
201
201
202 class ipeerlegacycommands(interfaceutil.Interface):
202 class ipeerlegacycommands(interfaceutil.Interface):
203 """Interface for implementing support for legacy wire protocol commands.
203 """Interface for implementing support for legacy wire protocol commands.
204
204
205 Wire protocol commands transition to legacy status when they are no longer
205 Wire protocol commands transition to legacy status when they are no longer
206 used by modern clients. To facilitate identifying which commands are
206 used by modern clients. To facilitate identifying which commands are
207 legacy, the interfaces are split.
207 legacy, the interfaces are split.
208 """
208 """
209
209
210 def between(pairs):
210 def between(pairs):
211 """Obtain nodes between pairs of nodes.
211 """Obtain nodes between pairs of nodes.
212
212
213 ``pairs`` is an iterable of node pairs.
213 ``pairs`` is an iterable of node pairs.
214
214
215 Returns an iterable of iterables of nodes corresponding to each
215 Returns an iterable of iterables of nodes corresponding to each
216 requested pair.
216 requested pair.
217 """
217 """
218
218
219 def branches(nodes):
219 def branches(nodes):
220 """Obtain ancestor changesets of specific nodes back to a branch point.
220 """Obtain ancestor changesets of specific nodes back to a branch point.
221
221
222 For each requested node, the peer finds the first ancestor node that is
222 For each requested node, the peer finds the first ancestor node that is
223 a DAG root or is a merge.
223 a DAG root or is a merge.
224
224
225 Returns an iterable of iterables with the resolved values for each node.
225 Returns an iterable of iterables with the resolved values for each node.
226 """
226 """
227
227
228 def changegroup(nodes, source):
228 def changegroup(nodes, source):
229 """Obtain a changegroup with data for descendants of specified nodes."""
229 """Obtain a changegroup with data for descendants of specified nodes."""
230
230
231 def changegroupsubset(bases, heads, source):
231 def changegroupsubset(bases, heads, source):
232 pass
232 pass
233
233
234 class ipeercommandexecutor(interfaceutil.Interface):
234 class ipeercommandexecutor(interfaceutil.Interface):
235 """Represents a mechanism to execute remote commands.
235 """Represents a mechanism to execute remote commands.
236
236
237 This is the primary interface for requesting that wire protocol commands
237 This is the primary interface for requesting that wire protocol commands
238 be executed. Instances of this interface are active in a context manager
238 be executed. Instances of this interface are active in a context manager
239 and have a well-defined lifetime. When the context manager exits, all
239 and have a well-defined lifetime. When the context manager exits, all
240 outstanding requests are waited on.
240 outstanding requests are waited on.
241 """
241 """
242
242
243 def callcommand(name, args):
243 def callcommand(name, args):
244 """Request that a named command be executed.
244 """Request that a named command be executed.
245
245
246 Receives the command name and a dictionary of command arguments.
246 Receives the command name and a dictionary of command arguments.
247
247
248 Returns a ``concurrent.futures.Future`` that will resolve to the
248 Returns a ``concurrent.futures.Future`` that will resolve to the
249 result of that command request. That exact value is left up to
249 result of that command request. That exact value is left up to
250 the implementation and possibly varies by command.
250 the implementation and possibly varies by command.
251
251
252 Not all commands can coexist with other commands in an executor
252 Not all commands can coexist with other commands in an executor
253 instance: it depends on the underlying wire protocol transport being
253 instance: it depends on the underlying wire protocol transport being
254 used and the command itself.
254 used and the command itself.
255
255
256 Implementations MAY call ``sendcommands()`` automatically if the
256 Implementations MAY call ``sendcommands()`` automatically if the
257 requested command can not coexist with other commands in this executor.
257 requested command can not coexist with other commands in this executor.
258
258
259 Implementations MAY call ``sendcommands()`` automatically when the
259 Implementations MAY call ``sendcommands()`` automatically when the
260 future's ``result()`` is called. So, consumers using multiple
260 future's ``result()`` is called. So, consumers using multiple
261 commands with an executor MUST ensure that ``result()`` is not called
261 commands with an executor MUST ensure that ``result()`` is not called
262 until all command requests have been issued.
262 until all command requests have been issued.
263 """
263 """
264
264
265 def sendcommands():
265 def sendcommands():
266 """Trigger submission of queued command requests.
266 """Trigger submission of queued command requests.
267
267
268 Not all transports submit commands as soon as they are requested to
268 Not all transports submit commands as soon as they are requested to
269 run. When called, this method forces queued command requests to be
269 run. When called, this method forces queued command requests to be
270 issued. It will no-op if all commands have already been sent.
270 issued. It will no-op if all commands have already been sent.
271
271
272 When called, no more new commands may be issued with this executor.
272 When called, no more new commands may be issued with this executor.
273 """
273 """
274
274
275 def close():
275 def close():
276 """Signal that this command request is finished.
276 """Signal that this command request is finished.
277
277
278 When called, no more new commands may be issued. All outstanding
278 When called, no more new commands may be issued. All outstanding
279 commands that have previously been issued are waited on before
279 commands that have previously been issued are waited on before
280 returning. This not only includes waiting for the futures to resolve,
280 returning. This not only includes waiting for the futures to resolve,
281 but also waiting for all response data to arrive. In other words,
281 but also waiting for all response data to arrive. In other words,
282 calling this waits for all on-wire state for issued command requests
282 calling this waits for all on-wire state for issued command requests
283 to finish.
283 to finish.
284
284
285 When used as a context manager, this method is called when exiting the
285 When used as a context manager, this method is called when exiting the
286 context manager.
286 context manager.
287
287
288 This method may call ``sendcommands()`` if there are buffered commands.
288 This method may call ``sendcommands()`` if there are buffered commands.
289 """
289 """
290
290
291 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
293
293
294 limitedarguments = interfaceutil.Attribute(
295 """True if the peer cannot receive large argument value for commands."""
296 )
297
294 def commandexecutor():
298 def commandexecutor():
295 """A context manager that resolves to an ipeercommandexecutor.
299 """A context manager that resolves to an ipeercommandexecutor.
296
300
297 The object this resolves to can be used to issue command requests
301 The object this resolves to can be used to issue command requests
298 to the peer.
302 to the peer.
299
303
300 Callers should call its ``callcommand`` method to issue command
304 Callers should call its ``callcommand`` method to issue command
301 requests.
305 requests.
302
306
303 A new executor should be obtained for each distinct set of commands
307 A new executor should be obtained for each distinct set of commands
304 (possibly just a single command) that the consumer wants to execute
308 (possibly just a single command) that the consumer wants to execute
305 as part of a single operation or round trip. This is because some
309 as part of a single operation or round trip. This is because some
306 peers are half-duplex and/or don't support persistent connections.
310 peers are half-duplex and/or don't support persistent connections.
307 e.g. in the case of HTTP peers, commands sent to an executor represent
311 e.g. in the case of HTTP peers, commands sent to an executor represent
308 a single HTTP request. While some peers may support multiple command
312 a single HTTP request. While some peers may support multiple command
309 sends over the wire per executor, consumers need to code to the least
313 sends over the wire per executor, consumers need to code to the least
310 capable peer. So it should be assumed that command executors buffer
314 capable peer. So it should be assumed that command executors buffer
311 called commands until they are told to send them and that each
315 called commands until they are told to send them and that each
312 command executor could result in a new connection or wire-level request
316 command executor could result in a new connection or wire-level request
313 being issued.
317 being issued.
314 """
318 """
315
319
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
320 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
317 """Unified interface for peer repositories.
321 """Unified interface for peer repositories.
318
322
319 All peer instances must conform to this interface.
323 All peer instances must conform to this interface.
320 """
324 """
321
325
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
326 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
323 """Unified peer interface for wire protocol version 2 peers."""
327 """Unified peer interface for wire protocol version 2 peers."""
324
328
325 apidescriptor = interfaceutil.Attribute(
329 apidescriptor = interfaceutil.Attribute(
326 """Data structure holding description of server API.""")
330 """Data structure holding description of server API.""")
327
331
328 @interfaceutil.implementer(ipeerbase)
332 @interfaceutil.implementer(ipeerbase)
329 class peer(object):
333 class peer(object):
330 """Base class for peer repositories."""
334 """Base class for peer repositories."""
331
335
336 limitedarguments = False
337
332 def capable(self, name):
338 def capable(self, name):
333 caps = self.capabilities()
339 caps = self.capabilities()
334 if name in caps:
340 if name in caps:
335 return True
341 return True
336
342
337 name = '%s=' % name
343 name = '%s=' % name
338 for cap in caps:
344 for cap in caps:
339 if cap.startswith(name):
345 if cap.startswith(name):
340 return cap[len(name):]
346 return cap[len(name):]
341
347
342 return False
348 return False
343
349
344 def requirecap(self, name, purpose):
350 def requirecap(self, name, purpose):
345 if self.capable(name):
351 if self.capable(name):
346 return
352 return
347
353
348 raise error.CapabilityError(
354 raise error.CapabilityError(
349 _('cannot %s; remote repository does not support the '
355 _('cannot %s; remote repository does not support the '
350 '\'%s\' capability') % (purpose, name))
356 '\'%s\' capability') % (purpose, name))
351
357
352 class iverifyproblem(interfaceutil.Interface):
358 class iverifyproblem(interfaceutil.Interface):
353 """Represents a problem with the integrity of the repository.
359 """Represents a problem with the integrity of the repository.
354
360
355 Instances of this interface are emitted to describe an integrity issue
361 Instances of this interface are emitted to describe an integrity issue
356 with a repository (e.g. corrupt storage, missing data, etc).
362 with a repository (e.g. corrupt storage, missing data, etc).
357
363
358 Instances are essentially messages associated with severity.
364 Instances are essentially messages associated with severity.
359 """
365 """
360 warning = interfaceutil.Attribute(
366 warning = interfaceutil.Attribute(
361 """Message indicating a non-fatal problem.""")
367 """Message indicating a non-fatal problem.""")
362
368
363 error = interfaceutil.Attribute(
369 error = interfaceutil.Attribute(
364 """Message indicating a fatal problem.""")
370 """Message indicating a fatal problem.""")
365
371
366 node = interfaceutil.Attribute(
372 node = interfaceutil.Attribute(
367 """Revision encountering the problem.
373 """Revision encountering the problem.
368
374
369 ``None`` means the problem doesn't apply to a single revision.
375 ``None`` means the problem doesn't apply to a single revision.
370 """)
376 """)
371
377
372 class irevisiondelta(interfaceutil.Interface):
378 class irevisiondelta(interfaceutil.Interface):
373 """Represents a delta between one revision and another.
379 """Represents a delta between one revision and another.
374
380
375 Instances convey enough information to allow a revision to be exchanged
381 Instances convey enough information to allow a revision to be exchanged
376 with another repository.
382 with another repository.
377
383
378 Instances represent the fulltext revision data or a delta against
384 Instances represent the fulltext revision data or a delta against
379 another revision. Therefore the ``revision`` and ``delta`` attributes
385 another revision. Therefore the ``revision`` and ``delta`` attributes
380 are mutually exclusive.
386 are mutually exclusive.
381
387
382 Typically used for changegroup generation.
388 Typically used for changegroup generation.
383 """
389 """
384
390
385 node = interfaceutil.Attribute(
391 node = interfaceutil.Attribute(
386 """20 byte node of this revision.""")
392 """20 byte node of this revision.""")
387
393
388 p1node = interfaceutil.Attribute(
394 p1node = interfaceutil.Attribute(
389 """20 byte node of 1st parent of this revision.""")
395 """20 byte node of 1st parent of this revision.""")
390
396
391 p2node = interfaceutil.Attribute(
397 p2node = interfaceutil.Attribute(
392 """20 byte node of 2nd parent of this revision.""")
398 """20 byte node of 2nd parent of this revision.""")
393
399
394 linknode = interfaceutil.Attribute(
400 linknode = interfaceutil.Attribute(
395 """20 byte node of the changelog revision this node is linked to.""")
401 """20 byte node of the changelog revision this node is linked to.""")
396
402
397 flags = interfaceutil.Attribute(
403 flags = interfaceutil.Attribute(
398 """2 bytes of integer flags that apply to this revision.
404 """2 bytes of integer flags that apply to this revision.
399
405
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
406 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
401 """)
407 """)
402
408
403 basenode = interfaceutil.Attribute(
409 basenode = interfaceutil.Attribute(
404 """20 byte node of the revision this data is a delta against.
410 """20 byte node of the revision this data is a delta against.
405
411
406 ``nullid`` indicates that the revision is a full revision and not
412 ``nullid`` indicates that the revision is a full revision and not
407 a delta.
413 a delta.
408 """)
414 """)
409
415
410 baserevisionsize = interfaceutil.Attribute(
416 baserevisionsize = interfaceutil.Attribute(
411 """Size of base revision this delta is against.
417 """Size of base revision this delta is against.
412
418
413 May be ``None`` if ``basenode`` is ``nullid``.
419 May be ``None`` if ``basenode`` is ``nullid``.
414 """)
420 """)
415
421
416 revision = interfaceutil.Attribute(
422 revision = interfaceutil.Attribute(
417 """Raw fulltext of revision data for this node.""")
423 """Raw fulltext of revision data for this node.""")
418
424
419 delta = interfaceutil.Attribute(
425 delta = interfaceutil.Attribute(
420 """Delta between ``basenode`` and ``node``.
426 """Delta between ``basenode`` and ``node``.
421
427
422 Stored in the bdiff delta format.
428 Stored in the bdiff delta format.
423 """)
429 """)
424
430
425 class ifilerevisionssequence(interfaceutil.Interface):
431 class ifilerevisionssequence(interfaceutil.Interface):
426 """Contains index data for all revisions of a file.
432 """Contains index data for all revisions of a file.
427
433
428 Types implementing this behave like lists of tuples. The index
434 Types implementing this behave like lists of tuples. The index
429 in the list corresponds to the revision number. The values contain
435 in the list corresponds to the revision number. The values contain
430 index metadata.
436 index metadata.
431
437
432 The *null* revision (revision number -1) is always the last item
438 The *null* revision (revision number -1) is always the last item
433 in the index.
439 in the index.
434 """
440 """
435
441
436 def __len__():
442 def __len__():
437 """The total number of revisions."""
443 """The total number of revisions."""
438
444
439 def __getitem__(rev):
445 def __getitem__(rev):
440 """Returns the object having a specific revision number.
446 """Returns the object having a specific revision number.
441
447
442 Returns an 8-tuple with the following fields:
448 Returns an 8-tuple with the following fields:
443
449
444 offset+flags
450 offset+flags
445 Contains the offset and flags for the revision. 64-bit unsigned
451 Contains the offset and flags for the revision. 64-bit unsigned
446 integer where first 6 bytes are the offset and the next 2 bytes
452 integer where first 6 bytes are the offset and the next 2 bytes
447 are flags. The offset can be 0 if it is not used by the store.
453 are flags. The offset can be 0 if it is not used by the store.
448 compressed size
454 compressed size
449 Size of the revision data in the store. It can be 0 if it isn't
455 Size of the revision data in the store. It can be 0 if it isn't
450 needed by the store.
456 needed by the store.
451 uncompressed size
457 uncompressed size
452 Fulltext size. It can be 0 if it isn't needed by the store.
458 Fulltext size. It can be 0 if it isn't needed by the store.
453 base revision
459 base revision
454 Revision number of revision the delta for storage is encoded
460 Revision number of revision the delta for storage is encoded
455 against. -1 indicates not encoded against a base revision.
461 against. -1 indicates not encoded against a base revision.
456 link revision
462 link revision
457 Revision number of changelog revision this entry is related to.
463 Revision number of changelog revision this entry is related to.
458 p1 revision
464 p1 revision
459 Revision number of 1st parent. -1 if no 1st parent.
465 Revision number of 1st parent. -1 if no 1st parent.
460 p2 revision
466 p2 revision
461 Revision number of 2nd parent. -1 if no 1st parent.
467 Revision number of 2nd parent. -1 if no 1st parent.
462 node
468 node
463 Binary node value for this revision number.
469 Binary node value for this revision number.
464
470
465 Negative values should index off the end of the sequence. ``-1``
471 Negative values should index off the end of the sequence. ``-1``
466 should return the null revision. ``-2`` should return the most
472 should return the null revision. ``-2`` should return the most
467 recent revision.
473 recent revision.
468 """
474 """
469
475
470 def __contains__(rev):
476 def __contains__(rev):
471 """Whether a revision number exists."""
477 """Whether a revision number exists."""
472
478
473 def insert(self, i, entry):
479 def insert(self, i, entry):
474 """Add an item to the index at specific revision."""
480 """Add an item to the index at specific revision."""
475
481
476 class ifileindex(interfaceutil.Interface):
482 class ifileindex(interfaceutil.Interface):
477 """Storage interface for index data of a single file.
483 """Storage interface for index data of a single file.
478
484
479 File storage data is divided into index metadata and data storage.
485 File storage data is divided into index metadata and data storage.
480 This interface defines the index portion of the interface.
486 This interface defines the index portion of the interface.
481
487
482 The index logically consists of:
488 The index logically consists of:
483
489
484 * A mapping between revision numbers and nodes.
490 * A mapping between revision numbers and nodes.
485 * DAG data (storing and querying the relationship between nodes).
491 * DAG data (storing and querying the relationship between nodes).
486 * Metadata to facilitate storage.
492 * Metadata to facilitate storage.
487 """
493 """
488 def __len__():
494 def __len__():
489 """Obtain the number of revisions stored for this file."""
495 """Obtain the number of revisions stored for this file."""
490
496
491 def __iter__():
497 def __iter__():
492 """Iterate over revision numbers for this file."""
498 """Iterate over revision numbers for this file."""
493
499
494 def hasnode(node):
500 def hasnode(node):
495 """Returns a bool indicating if a node is known to this store.
501 """Returns a bool indicating if a node is known to this store.
496
502
497 Implementations must only return True for full, binary node values:
503 Implementations must only return True for full, binary node values:
498 hex nodes, revision numbers, and partial node matches must be
504 hex nodes, revision numbers, and partial node matches must be
499 rejected.
505 rejected.
500
506
501 The null node is never present.
507 The null node is never present.
502 """
508 """
503
509
504 def revs(start=0, stop=None):
510 def revs(start=0, stop=None):
505 """Iterate over revision numbers for this file, with control."""
511 """Iterate over revision numbers for this file, with control."""
506
512
507 def parents(node):
513 def parents(node):
508 """Returns a 2-tuple of parent nodes for a revision.
514 """Returns a 2-tuple of parent nodes for a revision.
509
515
510 Values will be ``nullid`` if the parent is empty.
516 Values will be ``nullid`` if the parent is empty.
511 """
517 """
512
518
513 def parentrevs(rev):
519 def parentrevs(rev):
514 """Like parents() but operates on revision numbers."""
520 """Like parents() but operates on revision numbers."""
515
521
516 def rev(node):
522 def rev(node):
517 """Obtain the revision number given a node.
523 """Obtain the revision number given a node.
518
524
519 Raises ``error.LookupError`` if the node is not known.
525 Raises ``error.LookupError`` if the node is not known.
520 """
526 """
521
527
522 def node(rev):
528 def node(rev):
523 """Obtain the node value given a revision number.
529 """Obtain the node value given a revision number.
524
530
525 Raises ``IndexError`` if the node is not known.
531 Raises ``IndexError`` if the node is not known.
526 """
532 """
527
533
528 def lookup(node):
534 def lookup(node):
529 """Attempt to resolve a value to a node.
535 """Attempt to resolve a value to a node.
530
536
531 Value can be a binary node, hex node, revision number, or a string
537 Value can be a binary node, hex node, revision number, or a string
532 that can be converted to an integer.
538 that can be converted to an integer.
533
539
534 Raises ``error.LookupError`` if a node could not be resolved.
540 Raises ``error.LookupError`` if a node could not be resolved.
535 """
541 """
536
542
537 def linkrev(rev):
543 def linkrev(rev):
538 """Obtain the changeset revision number a revision is linked to."""
544 """Obtain the changeset revision number a revision is linked to."""
539
545
540 def iscensored(rev):
546 def iscensored(rev):
541 """Return whether a revision's content has been censored."""
547 """Return whether a revision's content has been censored."""
542
548
543 def commonancestorsheads(node1, node2):
549 def commonancestorsheads(node1, node2):
544 """Obtain an iterable of nodes containing heads of common ancestors.
550 """Obtain an iterable of nodes containing heads of common ancestors.
545
551
546 See ``ancestor.commonancestorsheads()``.
552 See ``ancestor.commonancestorsheads()``.
547 """
553 """
548
554
549 def descendants(revs):
555 def descendants(revs):
550 """Obtain descendant revision numbers for a set of revision numbers.
556 """Obtain descendant revision numbers for a set of revision numbers.
551
557
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
558 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
553 """
559 """
554
560
555 def heads(start=None, stop=None):
561 def heads(start=None, stop=None):
556 """Obtain a list of nodes that are DAG heads, with control.
562 """Obtain a list of nodes that are DAG heads, with control.
557
563
558 The set of revisions examined can be limited by specifying
564 The set of revisions examined can be limited by specifying
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
565 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
560 iterable of nodes. DAG traversal starts at earlier revision
566 iterable of nodes. DAG traversal starts at earlier revision
561 ``start`` and iterates forward until any node in ``stop`` is
567 ``start`` and iterates forward until any node in ``stop`` is
562 encountered.
568 encountered.
563 """
569 """
564
570
565 def children(node):
571 def children(node):
566 """Obtain nodes that are children of a node.
572 """Obtain nodes that are children of a node.
567
573
568 Returns a list of nodes.
574 Returns a list of nodes.
569 """
575 """
570
576
571 class ifiledata(interfaceutil.Interface):
577 class ifiledata(interfaceutil.Interface):
572 """Storage interface for data storage of a specific file.
578 """Storage interface for data storage of a specific file.
573
579
574 This complements ``ifileindex`` and provides an interface for accessing
580 This complements ``ifileindex`` and provides an interface for accessing
575 data for a tracked file.
581 data for a tracked file.
576 """
582 """
577 def size(rev):
583 def size(rev):
578 """Obtain the fulltext size of file data.
584 """Obtain the fulltext size of file data.
579
585
580 Any metadata is excluded from size measurements.
586 Any metadata is excluded from size measurements.
581 """
587 """
582
588
583 def revision(node, raw=False):
589 def revision(node, raw=False):
584 """"Obtain fulltext data for a node.
590 """"Obtain fulltext data for a node.
585
591
586 By default, any storage transformations are applied before the data
592 By default, any storage transformations are applied before the data
587 is returned. If ``raw`` is True, non-raw storage transformations
593 is returned. If ``raw`` is True, non-raw storage transformations
588 are not applied.
594 are not applied.
589
595
590 The fulltext data may contain a header containing metadata. Most
596 The fulltext data may contain a header containing metadata. Most
591 consumers should use ``read()`` to obtain the actual file data.
597 consumers should use ``read()`` to obtain the actual file data.
592 """
598 """
593
599
594 def read(node):
600 def read(node):
595 """Resolve file fulltext data.
601 """Resolve file fulltext data.
596
602
597 This is similar to ``revision()`` except any metadata in the data
603 This is similar to ``revision()`` except any metadata in the data
598 headers is stripped.
604 headers is stripped.
599 """
605 """
600
606
601 def renamed(node):
607 def renamed(node):
602 """Obtain copy metadata for a node.
608 """Obtain copy metadata for a node.
603
609
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
610 Returns ``False`` if no copy metadata is stored or a 2-tuple of
605 (path, node) from which this revision was copied.
611 (path, node) from which this revision was copied.
606 """
612 """
607
613
608 def cmp(node, fulltext):
614 def cmp(node, fulltext):
609 """Compare fulltext to another revision.
615 """Compare fulltext to another revision.
610
616
611 Returns True if the fulltext is different from what is stored.
617 Returns True if the fulltext is different from what is stored.
612
618
613 This takes copy metadata into account.
619 This takes copy metadata into account.
614
620
615 TODO better document the copy metadata and censoring logic.
621 TODO better document the copy metadata and censoring logic.
616 """
622 """
617
623
618 def emitrevisions(nodes,
624 def emitrevisions(nodes,
619 nodesorder=None,
625 nodesorder=None,
620 revisiondata=False,
626 revisiondata=False,
621 assumehaveparentrevisions=False,
627 assumehaveparentrevisions=False,
622 deltamode=CG_DELTAMODE_STD):
628 deltamode=CG_DELTAMODE_STD):
623 """Produce ``irevisiondelta`` for revisions.
629 """Produce ``irevisiondelta`` for revisions.
624
630
625 Given an iterable of nodes, emits objects conforming to the
631 Given an iterable of nodes, emits objects conforming to the
626 ``irevisiondelta`` interface that describe revisions in storage.
632 ``irevisiondelta`` interface that describe revisions in storage.
627
633
628 This method is a generator.
634 This method is a generator.
629
635
630 The input nodes may be unordered. Implementations must ensure that a
636 The input nodes may be unordered. Implementations must ensure that a
631 node's parents are emitted before the node itself. Transitively, this
637 node's parents are emitted before the node itself. Transitively, this
632 means that a node may only be emitted once all its ancestors in
638 means that a node may only be emitted once all its ancestors in
633 ``nodes`` have also been emitted.
639 ``nodes`` have also been emitted.
634
640
635 By default, emits "index" data (the ``node``, ``p1node``, and
641 By default, emits "index" data (the ``node``, ``p1node``, and
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
642 ``p2node`` attributes). If ``revisiondata`` is set, revision data
637 will also be present on the emitted objects.
643 will also be present on the emitted objects.
638
644
639 With default argument values, implementations can choose to emit
645 With default argument values, implementations can choose to emit
640 either fulltext revision data or a delta. When emitting deltas,
646 either fulltext revision data or a delta. When emitting deltas,
641 implementations must consider whether the delta's base revision
647 implementations must consider whether the delta's base revision
642 fulltext is available to the receiver.
648 fulltext is available to the receiver.
643
649
644 The base revision fulltext is guaranteed to be available if any of
650 The base revision fulltext is guaranteed to be available if any of
645 the following are met:
651 the following are met:
646
652
647 * Its fulltext revision was emitted by this method call.
653 * Its fulltext revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
654 * A delta for that revision was emitted by this method call.
649 * ``assumehaveparentrevisions`` is True and the base revision is a
655 * ``assumehaveparentrevisions`` is True and the base revision is a
650 parent of the node.
656 parent of the node.
651
657
652 ``nodesorder`` can be used to control the order that revisions are
658 ``nodesorder`` can be used to control the order that revisions are
653 emitted. By default, revisions can be reordered as long as they are
659 emitted. By default, revisions can be reordered as long as they are
654 in DAG topological order (see above). If the value is ``nodes``,
660 in DAG topological order (see above). If the value is ``nodes``,
655 the iteration order from ``nodes`` should be used. If the value is
661 the iteration order from ``nodes`` should be used. If the value is
656 ``storage``, then the native order from the backing storage layer
662 ``storage``, then the native order from the backing storage layer
657 is used. (Not all storage layers will have strong ordering and behavior
663 is used. (Not all storage layers will have strong ordering and behavior
658 of this mode is storage-dependent.) ``nodes`` ordering can force
664 of this mode is storage-dependent.) ``nodes`` ordering can force
659 revisions to be emitted before their ancestors, so consumers should
665 revisions to be emitted before their ancestors, so consumers should
660 use it with care.
666 use it with care.
661
667
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
668 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
663 be set and it is the caller's responsibility to resolve it, if needed.
669 be set and it is the caller's responsibility to resolve it, if needed.
664
670
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
671 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
666 all revision data should be emitted as deltas against the revision
672 all revision data should be emitted as deltas against the revision
667 emitted just prior. The initial revision should be a delta against its
673 emitted just prior. The initial revision should be a delta against its
668 1st parent.
674 1st parent.
669 """
675 """
670
676
671 class ifilemutation(interfaceutil.Interface):
677 class ifilemutation(interfaceutil.Interface):
672 """Storage interface for mutation events of a tracked file."""
678 """Storage interface for mutation events of a tracked file."""
673
679
674 def add(filedata, meta, transaction, linkrev, p1, p2):
680 def add(filedata, meta, transaction, linkrev, p1, p2):
675 """Add a new revision to the store.
681 """Add a new revision to the store.
676
682
677 Takes file data, dictionary of metadata, a transaction, linkrev,
683 Takes file data, dictionary of metadata, a transaction, linkrev,
678 and parent nodes.
684 and parent nodes.
679
685
680 Returns the node that was added.
686 Returns the node that was added.
681
687
682 May no-op if a revision matching the supplied data is already stored.
688 May no-op if a revision matching the supplied data is already stored.
683 """
689 """
684
690
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
691 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
686 flags=0, cachedelta=None):
692 flags=0, cachedelta=None):
687 """Add a new revision to the store.
693 """Add a new revision to the store.
688
694
689 This is similar to ``add()`` except it operates at a lower level.
695 This is similar to ``add()`` except it operates at a lower level.
690
696
691 The data passed in already contains a metadata header, if any.
697 The data passed in already contains a metadata header, if any.
692
698
693 ``node`` and ``flags`` can be used to define the expected node and
699 ``node`` and ``flags`` can be used to define the expected node and
694 the flags to use with storage. ``flags`` is a bitwise value composed
700 the flags to use with storage. ``flags`` is a bitwise value composed
695 of the various ``REVISION_FLAG_*`` constants.
701 of the various ``REVISION_FLAG_*`` constants.
696
702
697 ``add()`` is usually called when adding files from e.g. the working
703 ``add()`` is usually called when adding files from e.g. the working
698 directory. ``addrevision()`` is often called by ``add()`` and for
704 directory. ``addrevision()`` is often called by ``add()`` and for
699 scenarios where revision data has already been computed, such as when
705 scenarios where revision data has already been computed, such as when
700 applying raw data from a peer repo.
706 applying raw data from a peer repo.
701 """
707 """
702
708
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
709 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
704 maybemissingparents=False):
710 maybemissingparents=False):
705 """Process a series of deltas for storage.
711 """Process a series of deltas for storage.
706
712
707 ``deltas`` is an iterable of 7-tuples of
713 ``deltas`` is an iterable of 7-tuples of
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
714 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
709 to add.
715 to add.
710
716
711 The ``delta`` field contains ``mpatch`` data to apply to a base
717 The ``delta`` field contains ``mpatch`` data to apply to a base
712 revision, identified by ``deltabase``. The base node can be
718 revision, identified by ``deltabase``. The base node can be
713 ``nullid``, in which case the header from the delta can be ignored
719 ``nullid``, in which case the header from the delta can be ignored
714 and the delta used as the fulltext.
720 and the delta used as the fulltext.
715
721
716 ``addrevisioncb`` should be called for each node as it is committed.
722 ``addrevisioncb`` should be called for each node as it is committed.
717
723
718 ``maybemissingparents`` is a bool indicating whether the incoming
724 ``maybemissingparents`` is a bool indicating whether the incoming
719 data may reference parents/ancestor revisions that aren't present.
725 data may reference parents/ancestor revisions that aren't present.
720 This flag is set when receiving data into a "shallow" store that
726 This flag is set when receiving data into a "shallow" store that
721 doesn't hold all history.
727 doesn't hold all history.
722
728
723 Returns a list of nodes that were processed. A node will be in the list
729 Returns a list of nodes that were processed. A node will be in the list
724 even if it existed in the store previously.
730 even if it existed in the store previously.
725 """
731 """
726
732
727 def censorrevision(tr, node, tombstone=b''):
733 def censorrevision(tr, node, tombstone=b''):
728 """Remove the content of a single revision.
734 """Remove the content of a single revision.
729
735
730 The specified ``node`` will have its content purged from storage.
736 The specified ``node`` will have its content purged from storage.
731 Future attempts to access the revision data for this node will
737 Future attempts to access the revision data for this node will
732 result in failure.
738 result in failure.
733
739
734 A ``tombstone`` message can optionally be stored. This message may be
740 A ``tombstone`` message can optionally be stored. This message may be
735 displayed to users when they attempt to access the missing revision
741 displayed to users when they attempt to access the missing revision
736 data.
742 data.
737
743
738 Storage backends may have stored deltas against the previous content
744 Storage backends may have stored deltas against the previous content
739 in this revision. As part of censoring a revision, these storage
745 in this revision. As part of censoring a revision, these storage
740 backends are expected to rewrite any internally stored deltas such
746 backends are expected to rewrite any internally stored deltas such
741 that they no longer reference the deleted content.
747 that they no longer reference the deleted content.
742 """
748 """
743
749
744 def getstrippoint(minlink):
750 def getstrippoint(minlink):
745 """Find the minimum revision that must be stripped to strip a linkrev.
751 """Find the minimum revision that must be stripped to strip a linkrev.
746
752
747 Returns a 2-tuple containing the minimum revision number and a set
753 Returns a 2-tuple containing the minimum revision number and a set
748 of all revisions numbers that would be broken by this strip.
754 of all revisions numbers that would be broken by this strip.
749
755
750 TODO this is highly revlog centric and should be abstracted into
756 TODO this is highly revlog centric and should be abstracted into
751 a higher-level deletion API. ``repair.strip()`` relies on this.
757 a higher-level deletion API. ``repair.strip()`` relies on this.
752 """
758 """
753
759
754 def strip(minlink, transaction):
760 def strip(minlink, transaction):
755 """Remove storage of items starting at a linkrev.
761 """Remove storage of items starting at a linkrev.
756
762
757 This uses ``getstrippoint()`` to determine the first node to remove.
763 This uses ``getstrippoint()`` to determine the first node to remove.
758 Then it effectively truncates storage for all revisions after that.
764 Then it effectively truncates storage for all revisions after that.
759
765
760 TODO this is highly revlog centric and should be abstracted into a
766 TODO this is highly revlog centric and should be abstracted into a
761 higher-level deletion API.
767 higher-level deletion API.
762 """
768 """
763
769
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
770 class ifilestorage(ifileindex, ifiledata, ifilemutation):
765 """Complete storage interface for a single tracked file."""
771 """Complete storage interface for a single tracked file."""
766
772
767 def files():
773 def files():
768 """Obtain paths that are backing storage for this file.
774 """Obtain paths that are backing storage for this file.
769
775
770 TODO this is used heavily by verify code and there should probably
776 TODO this is used heavily by verify code and there should probably
771 be a better API for that.
777 be a better API for that.
772 """
778 """
773
779
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
780 def storageinfo(exclusivefiles=False, sharedfiles=False,
775 revisionscount=False, trackedsize=False,
781 revisionscount=False, trackedsize=False,
776 storedsize=False):
782 storedsize=False):
777 """Obtain information about storage for this file's data.
783 """Obtain information about storage for this file's data.
778
784
779 Returns a dict describing storage for this tracked path. The keys
785 Returns a dict describing storage for this tracked path. The keys
780 in the dict map to arguments of the same. The arguments are bools
786 in the dict map to arguments of the same. The arguments are bools
781 indicating whether to calculate and obtain that data.
787 indicating whether to calculate and obtain that data.
782
788
783 exclusivefiles
789 exclusivefiles
784 Iterable of (vfs, path) describing files that are exclusively
790 Iterable of (vfs, path) describing files that are exclusively
785 used to back storage for this tracked path.
791 used to back storage for this tracked path.
786
792
787 sharedfiles
793 sharedfiles
788 Iterable of (vfs, path) describing files that are used to back
794 Iterable of (vfs, path) describing files that are used to back
789 storage for this tracked path. Those files may also provide storage
795 storage for this tracked path. Those files may also provide storage
790 for other stored entities.
796 for other stored entities.
791
797
792 revisionscount
798 revisionscount
793 Number of revisions available for retrieval.
799 Number of revisions available for retrieval.
794
800
795 trackedsize
801 trackedsize
796 Total size in bytes of all tracked revisions. This is a sum of the
802 Total size in bytes of all tracked revisions. This is a sum of the
797 length of the fulltext of all revisions.
803 length of the fulltext of all revisions.
798
804
799 storedsize
805 storedsize
800 Total size in bytes used to store data for all tracked revisions.
806 Total size in bytes used to store data for all tracked revisions.
801 This is commonly less than ``trackedsize`` due to internal usage
807 This is commonly less than ``trackedsize`` due to internal usage
802 of deltas rather than fulltext revisions.
808 of deltas rather than fulltext revisions.
803
809
804 Not all storage backends may support all queries are have a reasonable
810 Not all storage backends may support all queries are have a reasonable
805 value to use. In that case, the value should be set to ``None`` and
811 value to use. In that case, the value should be set to ``None`` and
806 callers are expected to handle this special value.
812 callers are expected to handle this special value.
807 """
813 """
808
814
809 def verifyintegrity(state):
815 def verifyintegrity(state):
810 """Verifies the integrity of file storage.
816 """Verifies the integrity of file storage.
811
817
812 ``state`` is a dict holding state of the verifier process. It can be
818 ``state`` is a dict holding state of the verifier process. It can be
813 used to communicate data between invocations of multiple storage
819 used to communicate data between invocations of multiple storage
814 primitives.
820 primitives.
815
821
816 If individual revisions cannot have their revision content resolved,
822 If individual revisions cannot have their revision content resolved,
817 the method is expected to set the ``skipread`` key to a set of nodes
823 the method is expected to set the ``skipread`` key to a set of nodes
818 that encountered problems.
824 that encountered problems.
819
825
820 The method yields objects conforming to the ``iverifyproblem``
826 The method yields objects conforming to the ``iverifyproblem``
821 interface.
827 interface.
822 """
828 """
823
829
824 class idirs(interfaceutil.Interface):
830 class idirs(interfaceutil.Interface):
825 """Interface representing a collection of directories from paths.
831 """Interface representing a collection of directories from paths.
826
832
827 This interface is essentially a derived data structure representing
833 This interface is essentially a derived data structure representing
828 directories from a collection of paths.
834 directories from a collection of paths.
829 """
835 """
830
836
831 def addpath(path):
837 def addpath(path):
832 """Add a path to the collection.
838 """Add a path to the collection.
833
839
834 All directories in the path will be added to the collection.
840 All directories in the path will be added to the collection.
835 """
841 """
836
842
837 def delpath(path):
843 def delpath(path):
838 """Remove a path from the collection.
844 """Remove a path from the collection.
839
845
840 If the removal was the last path in a particular directory, the
846 If the removal was the last path in a particular directory, the
841 directory is removed from the collection.
847 directory is removed from the collection.
842 """
848 """
843
849
844 def __iter__():
850 def __iter__():
845 """Iterate over the directories in this collection of paths."""
851 """Iterate over the directories in this collection of paths."""
846
852
847 def __contains__(path):
853 def __contains__(path):
848 """Whether a specific directory is in this collection."""
854 """Whether a specific directory is in this collection."""
849
855
850 class imanifestdict(interfaceutil.Interface):
856 class imanifestdict(interfaceutil.Interface):
851 """Interface representing a manifest data structure.
857 """Interface representing a manifest data structure.
852
858
853 A manifest is effectively a dict mapping paths to entries. Each entry
859 A manifest is effectively a dict mapping paths to entries. Each entry
854 consists of a binary node and extra flags affecting that entry.
860 consists of a binary node and extra flags affecting that entry.
855 """
861 """
856
862
857 def __getitem__(path):
863 def __getitem__(path):
858 """Returns the binary node value for a path in the manifest.
864 """Returns the binary node value for a path in the manifest.
859
865
860 Raises ``KeyError`` if the path does not exist in the manifest.
866 Raises ``KeyError`` if the path does not exist in the manifest.
861
867
862 Equivalent to ``self.find(path)[0]``.
868 Equivalent to ``self.find(path)[0]``.
863 """
869 """
864
870
865 def find(path):
871 def find(path):
866 """Returns the entry for a path in the manifest.
872 """Returns the entry for a path in the manifest.
867
873
868 Returns a 2-tuple of (node, flags).
874 Returns a 2-tuple of (node, flags).
869
875
870 Raises ``KeyError`` if the path does not exist in the manifest.
876 Raises ``KeyError`` if the path does not exist in the manifest.
871 """
877 """
872
878
873 def __len__():
879 def __len__():
874 """Return the number of entries in the manifest."""
880 """Return the number of entries in the manifest."""
875
881
876 def __nonzero__():
882 def __nonzero__():
877 """Returns True if the manifest has entries, False otherwise."""
883 """Returns True if the manifest has entries, False otherwise."""
878
884
879 __bool__ = __nonzero__
885 __bool__ = __nonzero__
880
886
881 def __setitem__(path, node):
887 def __setitem__(path, node):
882 """Define the node value for a path in the manifest.
888 """Define the node value for a path in the manifest.
883
889
884 If the path is already in the manifest, its flags will be copied to
890 If the path is already in the manifest, its flags will be copied to
885 the new entry.
891 the new entry.
886 """
892 """
887
893
888 def __contains__(path):
894 def __contains__(path):
889 """Whether a path exists in the manifest."""
895 """Whether a path exists in the manifest."""
890
896
891 def __delitem__(path):
897 def __delitem__(path):
892 """Remove a path from the manifest.
898 """Remove a path from the manifest.
893
899
894 Raises ``KeyError`` if the path is not in the manifest.
900 Raises ``KeyError`` if the path is not in the manifest.
895 """
901 """
896
902
897 def __iter__():
903 def __iter__():
898 """Iterate over paths in the manifest."""
904 """Iterate over paths in the manifest."""
899
905
900 def iterkeys():
906 def iterkeys():
901 """Iterate over paths in the manifest."""
907 """Iterate over paths in the manifest."""
902
908
903 def keys():
909 def keys():
904 """Obtain a list of paths in the manifest."""
910 """Obtain a list of paths in the manifest."""
905
911
906 def filesnotin(other, match=None):
912 def filesnotin(other, match=None):
907 """Obtain the set of paths in this manifest but not in another.
913 """Obtain the set of paths in this manifest but not in another.
908
914
909 ``match`` is an optional matcher function to be applied to both
915 ``match`` is an optional matcher function to be applied to both
910 manifests.
916 manifests.
911
917
912 Returns a set of paths.
918 Returns a set of paths.
913 """
919 """
914
920
915 def dirs():
921 def dirs():
916 """Returns an object implementing the ``idirs`` interface."""
922 """Returns an object implementing the ``idirs`` interface."""
917
923
918 def hasdir(dir):
924 def hasdir(dir):
919 """Returns a bool indicating if a directory is in this manifest."""
925 """Returns a bool indicating if a directory is in this manifest."""
920
926
921 def matches(match):
927 def matches(match):
922 """Generate a new manifest filtered through a matcher.
928 """Generate a new manifest filtered through a matcher.
923
929
924 Returns an object conforming to the ``imanifestdict`` interface.
930 Returns an object conforming to the ``imanifestdict`` interface.
925 """
931 """
926
932
927 def walk(match):
933 def walk(match):
928 """Generator of paths in manifest satisfying a matcher.
934 """Generator of paths in manifest satisfying a matcher.
929
935
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
936 This is equivalent to ``self.matches(match).iterkeys()`` except a new
931 manifest object is not created.
937 manifest object is not created.
932
938
933 If the matcher has explicit files listed and they don't exist in
939 If the matcher has explicit files listed and they don't exist in
934 the manifest, ``match.bad()`` is called for each missing file.
940 the manifest, ``match.bad()`` is called for each missing file.
935 """
941 """
936
942
937 def diff(other, match=None, clean=False):
943 def diff(other, match=None, clean=False):
938 """Find differences between this manifest and another.
944 """Find differences between this manifest and another.
939
945
940 This manifest is compared to ``other``.
946 This manifest is compared to ``other``.
941
947
942 If ``match`` is provided, the two manifests are filtered against this
948 If ``match`` is provided, the two manifests are filtered against this
943 matcher and only entries satisfying the matcher are compared.
949 matcher and only entries satisfying the matcher are compared.
944
950
945 If ``clean`` is True, unchanged files are included in the returned
951 If ``clean`` is True, unchanged files are included in the returned
946 object.
952 object.
947
953
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
954 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
955 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
956 represents the node and flags for this manifest and ``(node2, flag2)``
951 are the same for the other manifest.
957 are the same for the other manifest.
952 """
958 """
953
959
954 def setflag(path, flag):
960 def setflag(path, flag):
955 """Set the flag value for a given path.
961 """Set the flag value for a given path.
956
962
957 Raises ``KeyError`` if the path is not already in the manifest.
963 Raises ``KeyError`` if the path is not already in the manifest.
958 """
964 """
959
965
960 def get(path, default=None):
966 def get(path, default=None):
961 """Obtain the node value for a path or a default value if missing."""
967 """Obtain the node value for a path or a default value if missing."""
962
968
963 def flags(path, default=''):
969 def flags(path, default=''):
964 """Return the flags value for a path or a default value if missing."""
970 """Return the flags value for a path or a default value if missing."""
965
971
966 def copy():
972 def copy():
967 """Return a copy of this manifest."""
973 """Return a copy of this manifest."""
968
974
969 def items():
975 def items():
970 """Returns an iterable of (path, node) for items in this manifest."""
976 """Returns an iterable of (path, node) for items in this manifest."""
971
977
972 def iteritems():
978 def iteritems():
973 """Identical to items()."""
979 """Identical to items()."""
974
980
975 def iterentries():
981 def iterentries():
976 """Returns an iterable of (path, node, flags) for this manifest.
982 """Returns an iterable of (path, node, flags) for this manifest.
977
983
978 Similar to ``iteritems()`` except items are a 3-tuple and include
984 Similar to ``iteritems()`` except items are a 3-tuple and include
979 flags.
985 flags.
980 """
986 """
981
987
982 def text():
988 def text():
983 """Obtain the raw data representation for this manifest.
989 """Obtain the raw data representation for this manifest.
984
990
985 Result is used to create a manifest revision.
991 Result is used to create a manifest revision.
986 """
992 """
987
993
988 def fastdelta(base, changes):
994 def fastdelta(base, changes):
989 """Obtain a delta between this manifest and another given changes.
995 """Obtain a delta between this manifest and another given changes.
990
996
991 ``base`` in the raw data representation for another manifest.
997 ``base`` in the raw data representation for another manifest.
992
998
993 ``changes`` is an iterable of ``(path, to_delete)``.
999 ``changes`` is an iterable of ``(path, to_delete)``.
994
1000
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1001 Returns a 2-tuple containing ``bytearray(self.text())`` and the
996 delta between ``base`` and this manifest.
1002 delta between ``base`` and this manifest.
997 """
1003 """
998
1004
999 class imanifestrevisionbase(interfaceutil.Interface):
1005 class imanifestrevisionbase(interfaceutil.Interface):
1000 """Base interface representing a single revision of a manifest.
1006 """Base interface representing a single revision of a manifest.
1001
1007
1002 Should not be used as a primary interface: should always be inherited
1008 Should not be used as a primary interface: should always be inherited
1003 as part of a larger interface.
1009 as part of a larger interface.
1004 """
1010 """
1005
1011
1006 def new():
1012 def new():
1007 """Obtain a new manifest instance.
1013 """Obtain a new manifest instance.
1008
1014
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1015 Returns an object conforming to the ``imanifestrevisionwritable``
1010 interface. The instance will be associated with the same
1016 interface. The instance will be associated with the same
1011 ``imanifestlog`` collection as this instance.
1017 ``imanifestlog`` collection as this instance.
1012 """
1018 """
1013
1019
1014 def copy():
1020 def copy():
1015 """Obtain a copy of this manifest instance.
1021 """Obtain a copy of this manifest instance.
1016
1022
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1023 Returns an object conforming to the ``imanifestrevisionwritable``
1018 interface. The instance will be associated with the same
1024 interface. The instance will be associated with the same
1019 ``imanifestlog`` collection as this instance.
1025 ``imanifestlog`` collection as this instance.
1020 """
1026 """
1021
1027
1022 def read():
1028 def read():
1023 """Obtain the parsed manifest data structure.
1029 """Obtain the parsed manifest data structure.
1024
1030
1025 The returned object conforms to the ``imanifestdict`` interface.
1031 The returned object conforms to the ``imanifestdict`` interface.
1026 """
1032 """
1027
1033
1028 class imanifestrevisionstored(imanifestrevisionbase):
1034 class imanifestrevisionstored(imanifestrevisionbase):
1029 """Interface representing a manifest revision committed to storage."""
1035 """Interface representing a manifest revision committed to storage."""
1030
1036
1031 def node():
1037 def node():
1032 """The binary node for this manifest."""
1038 """The binary node for this manifest."""
1033
1039
1034 parents = interfaceutil.Attribute(
1040 parents = interfaceutil.Attribute(
1035 """List of binary nodes that are parents for this manifest revision."""
1041 """List of binary nodes that are parents for this manifest revision."""
1036 )
1042 )
1037
1043
1038 def readdelta(shallow=False):
1044 def readdelta(shallow=False):
1039 """Obtain the manifest data structure representing changes from parent.
1045 """Obtain the manifest data structure representing changes from parent.
1040
1046
1041 This manifest is compared to its 1st parent. A new manifest representing
1047 This manifest is compared to its 1st parent. A new manifest representing
1042 those differences is constructed.
1048 those differences is constructed.
1043
1049
1044 The returned object conforms to the ``imanifestdict`` interface.
1050 The returned object conforms to the ``imanifestdict`` interface.
1045 """
1051 """
1046
1052
1047 def readfast(shallow=False):
1053 def readfast(shallow=False):
1048 """Calls either ``read()`` or ``readdelta()``.
1054 """Calls either ``read()`` or ``readdelta()``.
1049
1055
1050 The faster of the two options is called.
1056 The faster of the two options is called.
1051 """
1057 """
1052
1058
1053 def find(key):
1059 def find(key):
1054 """Calls self.read().find(key)``.
1060 """Calls self.read().find(key)``.
1055
1061
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1062 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1057 """
1063 """
1058
1064
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1065 class imanifestrevisionwritable(imanifestrevisionbase):
1060 """Interface representing a manifest revision that can be committed."""
1066 """Interface representing a manifest revision that can be committed."""
1061
1067
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1068 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1063 """Add this revision to storage.
1069 """Add this revision to storage.
1064
1070
1065 Takes a transaction object, the changeset revision number it will
1071 Takes a transaction object, the changeset revision number it will
1066 be associated with, its parent nodes, and lists of added and
1072 be associated with, its parent nodes, and lists of added and
1067 removed paths.
1073 removed paths.
1068
1074
1069 If match is provided, storage can choose not to inspect or write out
1075 If match is provided, storage can choose not to inspect or write out
1070 items that do not match. Storage is still required to be able to provide
1076 items that do not match. Storage is still required to be able to provide
1071 the full manifest in the future for any directories written (these
1077 the full manifest in the future for any directories written (these
1072 manifests should not be "narrowed on disk").
1078 manifests should not be "narrowed on disk").
1073
1079
1074 Returns the binary node of the created revision.
1080 Returns the binary node of the created revision.
1075 """
1081 """
1076
1082
1077 class imanifeststorage(interfaceutil.Interface):
1083 class imanifeststorage(interfaceutil.Interface):
1078 """Storage interface for manifest data."""
1084 """Storage interface for manifest data."""
1079
1085
1080 tree = interfaceutil.Attribute(
1086 tree = interfaceutil.Attribute(
1081 """The path to the directory this manifest tracks.
1087 """The path to the directory this manifest tracks.
1082
1088
1083 The empty bytestring represents the root manifest.
1089 The empty bytestring represents the root manifest.
1084 """)
1090 """)
1085
1091
1086 index = interfaceutil.Attribute(
1092 index = interfaceutil.Attribute(
1087 """An ``ifilerevisionssequence`` instance.""")
1093 """An ``ifilerevisionssequence`` instance.""")
1088
1094
1089 indexfile = interfaceutil.Attribute(
1095 indexfile = interfaceutil.Attribute(
1090 """Path of revlog index file.
1096 """Path of revlog index file.
1091
1097
1092 TODO this is revlog specific and should not be exposed.
1098 TODO this is revlog specific and should not be exposed.
1093 """)
1099 """)
1094
1100
1095 opener = interfaceutil.Attribute(
1101 opener = interfaceutil.Attribute(
1096 """VFS opener to use to access underlying files used for storage.
1102 """VFS opener to use to access underlying files used for storage.
1097
1103
1098 TODO this is revlog specific and should not be exposed.
1104 TODO this is revlog specific and should not be exposed.
1099 """)
1105 """)
1100
1106
1101 version = interfaceutil.Attribute(
1107 version = interfaceutil.Attribute(
1102 """Revlog version number.
1108 """Revlog version number.
1103
1109
1104 TODO this is revlog specific and should not be exposed.
1110 TODO this is revlog specific and should not be exposed.
1105 """)
1111 """)
1106
1112
1107 _generaldelta = interfaceutil.Attribute(
1113 _generaldelta = interfaceutil.Attribute(
1108 """Whether generaldelta storage is being used.
1114 """Whether generaldelta storage is being used.
1109
1115
1110 TODO this is revlog specific and should not be exposed.
1116 TODO this is revlog specific and should not be exposed.
1111 """)
1117 """)
1112
1118
1113 fulltextcache = interfaceutil.Attribute(
1119 fulltextcache = interfaceutil.Attribute(
1114 """Dict with cache of fulltexts.
1120 """Dict with cache of fulltexts.
1115
1121
1116 TODO this doesn't feel appropriate for the storage interface.
1122 TODO this doesn't feel appropriate for the storage interface.
1117 """)
1123 """)
1118
1124
1119 def __len__():
1125 def __len__():
1120 """Obtain the number of revisions stored for this manifest."""
1126 """Obtain the number of revisions stored for this manifest."""
1121
1127
1122 def __iter__():
1128 def __iter__():
1123 """Iterate over revision numbers for this manifest."""
1129 """Iterate over revision numbers for this manifest."""
1124
1130
1125 def rev(node):
1131 def rev(node):
1126 """Obtain the revision number given a binary node.
1132 """Obtain the revision number given a binary node.
1127
1133
1128 Raises ``error.LookupError`` if the node is not known.
1134 Raises ``error.LookupError`` if the node is not known.
1129 """
1135 """
1130
1136
1131 def node(rev):
1137 def node(rev):
1132 """Obtain the node value given a revision number.
1138 """Obtain the node value given a revision number.
1133
1139
1134 Raises ``error.LookupError`` if the revision is not known.
1140 Raises ``error.LookupError`` if the revision is not known.
1135 """
1141 """
1136
1142
1137 def lookup(value):
1143 def lookup(value):
1138 """Attempt to resolve a value to a node.
1144 """Attempt to resolve a value to a node.
1139
1145
1140 Value can be a binary node, hex node, revision number, or a bytes
1146 Value can be a binary node, hex node, revision number, or a bytes
1141 that can be converted to an integer.
1147 that can be converted to an integer.
1142
1148
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1149 Raises ``error.LookupError`` if a ndoe could not be resolved.
1144 """
1150 """
1145
1151
1146 def parents(node):
1152 def parents(node):
1147 """Returns a 2-tuple of parent nodes for a node.
1153 """Returns a 2-tuple of parent nodes for a node.
1148
1154
1149 Values will be ``nullid`` if the parent is empty.
1155 Values will be ``nullid`` if the parent is empty.
1150 """
1156 """
1151
1157
1152 def parentrevs(rev):
1158 def parentrevs(rev):
1153 """Like parents() but operates on revision numbers."""
1159 """Like parents() but operates on revision numbers."""
1154
1160
1155 def linkrev(rev):
1161 def linkrev(rev):
1156 """Obtain the changeset revision number a revision is linked to."""
1162 """Obtain the changeset revision number a revision is linked to."""
1157
1163
1158 def revision(node, _df=None, raw=False):
1164 def revision(node, _df=None, raw=False):
1159 """Obtain fulltext data for a node."""
1165 """Obtain fulltext data for a node."""
1160
1166
1161 def revdiff(rev1, rev2):
1167 def revdiff(rev1, rev2):
1162 """Obtain a delta between two revision numbers.
1168 """Obtain a delta between two revision numbers.
1163
1169
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1170 The returned data is the result of ``bdiff.bdiff()`` on the raw
1165 revision data.
1171 revision data.
1166 """
1172 """
1167
1173
1168 def cmp(node, fulltext):
1174 def cmp(node, fulltext):
1169 """Compare fulltext to another revision.
1175 """Compare fulltext to another revision.
1170
1176
1171 Returns True if the fulltext is different from what is stored.
1177 Returns True if the fulltext is different from what is stored.
1172 """
1178 """
1173
1179
1174 def emitrevisions(nodes,
1180 def emitrevisions(nodes,
1175 nodesorder=None,
1181 nodesorder=None,
1176 revisiondata=False,
1182 revisiondata=False,
1177 assumehaveparentrevisions=False):
1183 assumehaveparentrevisions=False):
1178 """Produce ``irevisiondelta`` describing revisions.
1184 """Produce ``irevisiondelta`` describing revisions.
1179
1185
1180 See the documentation for ``ifiledata`` for more.
1186 See the documentation for ``ifiledata`` for more.
1181 """
1187 """
1182
1188
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1189 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1184 """Process a series of deltas for storage.
1190 """Process a series of deltas for storage.
1185
1191
1186 See the documentation in ``ifilemutation`` for more.
1192 See the documentation in ``ifilemutation`` for more.
1187 """
1193 """
1188
1194
1189 def rawsize(rev):
1195 def rawsize(rev):
1190 """Obtain the size of tracked data.
1196 """Obtain the size of tracked data.
1191
1197
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1198 Is equivalent to ``len(m.revision(node, raw=True))``.
1193
1199
1194 TODO this method is only used by upgrade code and may be removed.
1200 TODO this method is only used by upgrade code and may be removed.
1195 """
1201 """
1196
1202
1197 def getstrippoint(minlink):
1203 def getstrippoint(minlink):
1198 """Find minimum revision that must be stripped to strip a linkrev.
1204 """Find minimum revision that must be stripped to strip a linkrev.
1199
1205
1200 See the documentation in ``ifilemutation`` for more.
1206 See the documentation in ``ifilemutation`` for more.
1201 """
1207 """
1202
1208
1203 def strip(minlink, transaction):
1209 def strip(minlink, transaction):
1204 """Remove storage of items starting at a linkrev.
1210 """Remove storage of items starting at a linkrev.
1205
1211
1206 See the documentation in ``ifilemutation`` for more.
1212 See the documentation in ``ifilemutation`` for more.
1207 """
1213 """
1208
1214
1209 def checksize():
1215 def checksize():
1210 """Obtain the expected sizes of backing files.
1216 """Obtain the expected sizes of backing files.
1211
1217
1212 TODO this is used by verify and it should not be part of the interface.
1218 TODO this is used by verify and it should not be part of the interface.
1213 """
1219 """
1214
1220
1215 def files():
1221 def files():
1216 """Obtain paths that are backing storage for this manifest.
1222 """Obtain paths that are backing storage for this manifest.
1217
1223
1218 TODO this is used by verify and there should probably be a better API
1224 TODO this is used by verify and there should probably be a better API
1219 for this functionality.
1225 for this functionality.
1220 """
1226 """
1221
1227
1222 def deltaparent(rev):
1228 def deltaparent(rev):
1223 """Obtain the revision that a revision is delta'd against.
1229 """Obtain the revision that a revision is delta'd against.
1224
1230
1225 TODO delta encoding is an implementation detail of storage and should
1231 TODO delta encoding is an implementation detail of storage and should
1226 not be exposed to the storage interface.
1232 not be exposed to the storage interface.
1227 """
1233 """
1228
1234
1229 def clone(tr, dest, **kwargs):
1235 def clone(tr, dest, **kwargs):
1230 """Clone this instance to another."""
1236 """Clone this instance to another."""
1231
1237
1232 def clearcaches(clear_persisted_data=False):
1238 def clearcaches(clear_persisted_data=False):
1233 """Clear any caches associated with this instance."""
1239 """Clear any caches associated with this instance."""
1234
1240
1235 def dirlog(d):
1241 def dirlog(d):
1236 """Obtain a manifest storage instance for a tree."""
1242 """Obtain a manifest storage instance for a tree."""
1237
1243
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1244 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1239 match=None):
1245 match=None):
1240 """Add a revision to storage.
1246 """Add a revision to storage.
1241
1247
1242 ``m`` is an object conforming to ``imanifestdict``.
1248 ``m`` is an object conforming to ``imanifestdict``.
1243
1249
1244 ``link`` is the linkrev revision number.
1250 ``link`` is the linkrev revision number.
1245
1251
1246 ``p1`` and ``p2`` are the parent revision numbers.
1252 ``p1`` and ``p2`` are the parent revision numbers.
1247
1253
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1254 ``added`` and ``removed`` are iterables of added and removed paths,
1249 respectively.
1255 respectively.
1250
1256
1251 ``readtree`` is a function that can be used to read the child tree(s)
1257 ``readtree`` is a function that can be used to read the child tree(s)
1252 when recursively writing the full tree structure when using
1258 when recursively writing the full tree structure when using
1253 treemanifets.
1259 treemanifets.
1254
1260
1255 ``match`` is a matcher that can be used to hint to storage that not all
1261 ``match`` is a matcher that can be used to hint to storage that not all
1256 paths must be inspected; this is an optimization and can be safely
1262 paths must be inspected; this is an optimization and can be safely
1257 ignored. Note that the storage must still be able to reproduce a full
1263 ignored. Note that the storage must still be able to reproduce a full
1258 manifest including files that did not match.
1264 manifest including files that did not match.
1259 """
1265 """
1260
1266
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1267 def storageinfo(exclusivefiles=False, sharedfiles=False,
1262 revisionscount=False, trackedsize=False,
1268 revisionscount=False, trackedsize=False,
1263 storedsize=False):
1269 storedsize=False):
1264 """Obtain information about storage for this manifest's data.
1270 """Obtain information about storage for this manifest's data.
1265
1271
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1272 See ``ifilestorage.storageinfo()`` for a description of this method.
1267 This one behaves the same way, except for manifest data.
1273 This one behaves the same way, except for manifest data.
1268 """
1274 """
1269
1275
1270 class imanifestlog(interfaceutil.Interface):
1276 class imanifestlog(interfaceutil.Interface):
1271 """Interface representing a collection of manifest snapshots.
1277 """Interface representing a collection of manifest snapshots.
1272
1278
1273 Represents the root manifest in a repository.
1279 Represents the root manifest in a repository.
1274
1280
1275 Also serves as a means to access nested tree manifests and to cache
1281 Also serves as a means to access nested tree manifests and to cache
1276 tree manifests.
1282 tree manifests.
1277 """
1283 """
1278
1284
1279 def __getitem__(node):
1285 def __getitem__(node):
1280 """Obtain a manifest instance for a given binary node.
1286 """Obtain a manifest instance for a given binary node.
1281
1287
1282 Equivalent to calling ``self.get('', node)``.
1288 Equivalent to calling ``self.get('', node)``.
1283
1289
1284 The returned object conforms to the ``imanifestrevisionstored``
1290 The returned object conforms to the ``imanifestrevisionstored``
1285 interface.
1291 interface.
1286 """
1292 """
1287
1293
1288 def get(tree, node, verify=True):
1294 def get(tree, node, verify=True):
1289 """Retrieve the manifest instance for a given directory and binary node.
1295 """Retrieve the manifest instance for a given directory and binary node.
1290
1296
1291 ``node`` always refers to the node of the root manifest (which will be
1297 ``node`` always refers to the node of the root manifest (which will be
1292 the only manifest if flat manifests are being used).
1298 the only manifest if flat manifests are being used).
1293
1299
1294 If ``tree`` is the empty string, the root manifest is returned.
1300 If ``tree`` is the empty string, the root manifest is returned.
1295 Otherwise the manifest for the specified directory will be returned
1301 Otherwise the manifest for the specified directory will be returned
1296 (requires tree manifests).
1302 (requires tree manifests).
1297
1303
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1304 If ``verify`` is True, ``LookupError`` is raised if the node is not
1299 known.
1305 known.
1300
1306
1301 The returned object conforms to the ``imanifestrevisionstored``
1307 The returned object conforms to the ``imanifestrevisionstored``
1302 interface.
1308 interface.
1303 """
1309 """
1304
1310
1305 def getstorage(tree):
1311 def getstorage(tree):
1306 """Retrieve an interface to storage for a particular tree.
1312 """Retrieve an interface to storage for a particular tree.
1307
1313
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1314 If ``tree`` is the empty bytestring, storage for the root manifest will
1309 be returned. Otherwise storage for a tree manifest is returned.
1315 be returned. Otherwise storage for a tree manifest is returned.
1310
1316
1311 TODO formalize interface for returned object.
1317 TODO formalize interface for returned object.
1312 """
1318 """
1313
1319
1314 def clearcaches():
1320 def clearcaches():
1315 """Clear caches associated with this collection."""
1321 """Clear caches associated with this collection."""
1316
1322
1317 def rev(node):
1323 def rev(node):
1318 """Obtain the revision number for a binary node.
1324 """Obtain the revision number for a binary node.
1319
1325
1320 Raises ``error.LookupError`` if the node is not known.
1326 Raises ``error.LookupError`` if the node is not known.
1321 """
1327 """
1322
1328
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1329 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1324 """Local repository sub-interface providing access to tracked file storage.
1330 """Local repository sub-interface providing access to tracked file storage.
1325
1331
1326 This interface defines how a repository accesses storage for a single
1332 This interface defines how a repository accesses storage for a single
1327 tracked file path.
1333 tracked file path.
1328 """
1334 """
1329
1335
1330 def file(f):
1336 def file(f):
1331 """Obtain a filelog for a tracked path.
1337 """Obtain a filelog for a tracked path.
1332
1338
1333 The returned type conforms to the ``ifilestorage`` interface.
1339 The returned type conforms to the ``ifilestorage`` interface.
1334 """
1340 """
1335
1341
1336 class ilocalrepositorymain(interfaceutil.Interface):
1342 class ilocalrepositorymain(interfaceutil.Interface):
1337 """Main interface for local repositories.
1343 """Main interface for local repositories.
1338
1344
1339 This currently captures the reality of things - not how things should be.
1345 This currently captures the reality of things - not how things should be.
1340 """
1346 """
1341
1347
1342 supportedformats = interfaceutil.Attribute(
1348 supportedformats = interfaceutil.Attribute(
1343 """Set of requirements that apply to stream clone.
1349 """Set of requirements that apply to stream clone.
1344
1350
1345 This is actually a class attribute and is shared among all instances.
1351 This is actually a class attribute and is shared among all instances.
1346 """)
1352 """)
1347
1353
1348 supported = interfaceutil.Attribute(
1354 supported = interfaceutil.Attribute(
1349 """Set of requirements that this repo is capable of opening.""")
1355 """Set of requirements that this repo is capable of opening.""")
1350
1356
1351 requirements = interfaceutil.Attribute(
1357 requirements = interfaceutil.Attribute(
1352 """Set of requirements this repo uses.""")
1358 """Set of requirements this repo uses.""")
1353
1359
1354 features = interfaceutil.Attribute(
1360 features = interfaceutil.Attribute(
1355 """Set of "features" this repository supports.
1361 """Set of "features" this repository supports.
1356
1362
1357 A "feature" is a loosely-defined term. It can refer to a feature
1363 A "feature" is a loosely-defined term. It can refer to a feature
1358 in the classical sense or can describe an implementation detail
1364 in the classical sense or can describe an implementation detail
1359 of the repository. For example, a ``readonly`` feature may denote
1365 of the repository. For example, a ``readonly`` feature may denote
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1366 the repository as read-only. Or a ``revlogfilestore`` feature may
1361 denote that the repository is using revlogs for file storage.
1367 denote that the repository is using revlogs for file storage.
1362
1368
1363 The intent of features is to provide a machine-queryable mechanism
1369 The intent of features is to provide a machine-queryable mechanism
1364 for repo consumers to test for various repository characteristics.
1370 for repo consumers to test for various repository characteristics.
1365
1371
1366 Features are similar to ``requirements``. The main difference is that
1372 Features are similar to ``requirements``. The main difference is that
1367 requirements are stored on-disk and represent requirements to open the
1373 requirements are stored on-disk and represent requirements to open the
1368 repository. Features are more run-time capabilities of the repository
1374 repository. Features are more run-time capabilities of the repository
1369 and more granular capabilities (which may be derived from requirements).
1375 and more granular capabilities (which may be derived from requirements).
1370 """)
1376 """)
1371
1377
1372 filtername = interfaceutil.Attribute(
1378 filtername = interfaceutil.Attribute(
1373 """Name of the repoview that is active on this repo.""")
1379 """Name of the repoview that is active on this repo.""")
1374
1380
1375 wvfs = interfaceutil.Attribute(
1381 wvfs = interfaceutil.Attribute(
1376 """VFS used to access the working directory.""")
1382 """VFS used to access the working directory.""")
1377
1383
1378 vfs = interfaceutil.Attribute(
1384 vfs = interfaceutil.Attribute(
1379 """VFS rooted at the .hg directory.
1385 """VFS rooted at the .hg directory.
1380
1386
1381 Used to access repository data not in the store.
1387 Used to access repository data not in the store.
1382 """)
1388 """)
1383
1389
1384 svfs = interfaceutil.Attribute(
1390 svfs = interfaceutil.Attribute(
1385 """VFS rooted at the store.
1391 """VFS rooted at the store.
1386
1392
1387 Used to access repository data in the store. Typically .hg/store.
1393 Used to access repository data in the store. Typically .hg/store.
1388 But can point elsewhere if the store is shared.
1394 But can point elsewhere if the store is shared.
1389 """)
1395 """)
1390
1396
1391 root = interfaceutil.Attribute(
1397 root = interfaceutil.Attribute(
1392 """Path to the root of the working directory.""")
1398 """Path to the root of the working directory.""")
1393
1399
1394 path = interfaceutil.Attribute(
1400 path = interfaceutil.Attribute(
1395 """Path to the .hg directory.""")
1401 """Path to the .hg directory.""")
1396
1402
1397 origroot = interfaceutil.Attribute(
1403 origroot = interfaceutil.Attribute(
1398 """The filesystem path that was used to construct the repo.""")
1404 """The filesystem path that was used to construct the repo.""")
1399
1405
1400 auditor = interfaceutil.Attribute(
1406 auditor = interfaceutil.Attribute(
1401 """A pathauditor for the working directory.
1407 """A pathauditor for the working directory.
1402
1408
1403 This checks if a path refers to a nested repository.
1409 This checks if a path refers to a nested repository.
1404
1410
1405 Operates on the filesystem.
1411 Operates on the filesystem.
1406 """)
1412 """)
1407
1413
1408 nofsauditor = interfaceutil.Attribute(
1414 nofsauditor = interfaceutil.Attribute(
1409 """A pathauditor for the working directory.
1415 """A pathauditor for the working directory.
1410
1416
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1417 This is like ``auditor`` except it doesn't do filesystem checks.
1412 """)
1418 """)
1413
1419
1414 baseui = interfaceutil.Attribute(
1420 baseui = interfaceutil.Attribute(
1415 """Original ui instance passed into constructor.""")
1421 """Original ui instance passed into constructor.""")
1416
1422
1417 ui = interfaceutil.Attribute(
1423 ui = interfaceutil.Attribute(
1418 """Main ui instance for this instance.""")
1424 """Main ui instance for this instance.""")
1419
1425
1420 sharedpath = interfaceutil.Attribute(
1426 sharedpath = interfaceutil.Attribute(
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1427 """Path to the .hg directory of the repo this repo was shared from.""")
1422
1428
1423 store = interfaceutil.Attribute(
1429 store = interfaceutil.Attribute(
1424 """A store instance.""")
1430 """A store instance.""")
1425
1431
1426 spath = interfaceutil.Attribute(
1432 spath = interfaceutil.Attribute(
1427 """Path to the store.""")
1433 """Path to the store.""")
1428
1434
1429 sjoin = interfaceutil.Attribute(
1435 sjoin = interfaceutil.Attribute(
1430 """Alias to self.store.join.""")
1436 """Alias to self.store.join.""")
1431
1437
1432 cachevfs = interfaceutil.Attribute(
1438 cachevfs = interfaceutil.Attribute(
1433 """A VFS used to access the cache directory.
1439 """A VFS used to access the cache directory.
1434
1440
1435 Typically .hg/cache.
1441 Typically .hg/cache.
1436 """)
1442 """)
1437
1443
1438 wcachevfs = interfaceutil.Attribute(
1444 wcachevfs = interfaceutil.Attribute(
1439 """A VFS used to access the cache directory dedicated to working copy
1445 """A VFS used to access the cache directory dedicated to working copy
1440
1446
1441 Typically .hg/wcache.
1447 Typically .hg/wcache.
1442 """)
1448 """)
1443
1449
1444 filteredrevcache = interfaceutil.Attribute(
1450 filteredrevcache = interfaceutil.Attribute(
1445 """Holds sets of revisions to be filtered.""")
1451 """Holds sets of revisions to be filtered.""")
1446
1452
1447 names = interfaceutil.Attribute(
1453 names = interfaceutil.Attribute(
1448 """A ``namespaces`` instance.""")
1454 """A ``namespaces`` instance.""")
1449
1455
1450 def close():
1456 def close():
1451 """Close the handle on this repository."""
1457 """Close the handle on this repository."""
1452
1458
1453 def peer():
1459 def peer():
1454 """Obtain an object conforming to the ``peer`` interface."""
1460 """Obtain an object conforming to the ``peer`` interface."""
1455
1461
1456 def unfiltered():
1462 def unfiltered():
1457 """Obtain an unfiltered/raw view of this repo."""
1463 """Obtain an unfiltered/raw view of this repo."""
1458
1464
1459 def filtered(name, visibilityexceptions=None):
1465 def filtered(name, visibilityexceptions=None):
1460 """Obtain a named view of this repository."""
1466 """Obtain a named view of this repository."""
1461
1467
1462 obsstore = interfaceutil.Attribute(
1468 obsstore = interfaceutil.Attribute(
1463 """A store of obsolescence data.""")
1469 """A store of obsolescence data.""")
1464
1470
1465 changelog = interfaceutil.Attribute(
1471 changelog = interfaceutil.Attribute(
1466 """A handle on the changelog revlog.""")
1472 """A handle on the changelog revlog.""")
1467
1473
1468 manifestlog = interfaceutil.Attribute(
1474 manifestlog = interfaceutil.Attribute(
1469 """An instance conforming to the ``imanifestlog`` interface.
1475 """An instance conforming to the ``imanifestlog`` interface.
1470
1476
1471 Provides access to manifests for the repository.
1477 Provides access to manifests for the repository.
1472 """)
1478 """)
1473
1479
1474 dirstate = interfaceutil.Attribute(
1480 dirstate = interfaceutil.Attribute(
1475 """Working directory state.""")
1481 """Working directory state.""")
1476
1482
1477 narrowpats = interfaceutil.Attribute(
1483 narrowpats = interfaceutil.Attribute(
1478 """Matcher patterns for this repository's narrowspec.""")
1484 """Matcher patterns for this repository's narrowspec.""")
1479
1485
1480 def narrowmatch(match=None, includeexact=False):
1486 def narrowmatch(match=None, includeexact=False):
1481 """Obtain a matcher for the narrowspec."""
1487 """Obtain a matcher for the narrowspec."""
1482
1488
1483 def setnarrowpats(newincludes, newexcludes):
1489 def setnarrowpats(newincludes, newexcludes):
1484 """Define the narrowspec for this repository."""
1490 """Define the narrowspec for this repository."""
1485
1491
1486 def __getitem__(changeid):
1492 def __getitem__(changeid):
1487 """Try to resolve a changectx."""
1493 """Try to resolve a changectx."""
1488
1494
1489 def __contains__(changeid):
1495 def __contains__(changeid):
1490 """Whether a changeset exists."""
1496 """Whether a changeset exists."""
1491
1497
1492 def __nonzero__():
1498 def __nonzero__():
1493 """Always returns True."""
1499 """Always returns True."""
1494 return True
1500 return True
1495
1501
1496 __bool__ = __nonzero__
1502 __bool__ = __nonzero__
1497
1503
1498 def __len__():
1504 def __len__():
1499 """Returns the number of changesets in the repo."""
1505 """Returns the number of changesets in the repo."""
1500
1506
1501 def __iter__():
1507 def __iter__():
1502 """Iterate over revisions in the changelog."""
1508 """Iterate over revisions in the changelog."""
1503
1509
1504 def revs(expr, *args):
1510 def revs(expr, *args):
1505 """Evaluate a revset.
1511 """Evaluate a revset.
1506
1512
1507 Emits revisions.
1513 Emits revisions.
1508 """
1514 """
1509
1515
1510 def set(expr, *args):
1516 def set(expr, *args):
1511 """Evaluate a revset.
1517 """Evaluate a revset.
1512
1518
1513 Emits changectx instances.
1519 Emits changectx instances.
1514 """
1520 """
1515
1521
1516 def anyrevs(specs, user=False, localalias=None):
1522 def anyrevs(specs, user=False, localalias=None):
1517 """Find revisions matching one of the given revsets."""
1523 """Find revisions matching one of the given revsets."""
1518
1524
1519 def url():
1525 def url():
1520 """Returns a string representing the location of this repo."""
1526 """Returns a string representing the location of this repo."""
1521
1527
1522 def hook(name, throw=False, **args):
1528 def hook(name, throw=False, **args):
1523 """Call a hook."""
1529 """Call a hook."""
1524
1530
1525 def tags():
1531 def tags():
1526 """Return a mapping of tag to node."""
1532 """Return a mapping of tag to node."""
1527
1533
1528 def tagtype(tagname):
1534 def tagtype(tagname):
1529 """Return the type of a given tag."""
1535 """Return the type of a given tag."""
1530
1536
1531 def tagslist():
1537 def tagslist():
1532 """Return a list of tags ordered by revision."""
1538 """Return a list of tags ordered by revision."""
1533
1539
1534 def nodetags(node):
1540 def nodetags(node):
1535 """Return the tags associated with a node."""
1541 """Return the tags associated with a node."""
1536
1542
1537 def nodebookmarks(node):
1543 def nodebookmarks(node):
1538 """Return the list of bookmarks pointing to the specified node."""
1544 """Return the list of bookmarks pointing to the specified node."""
1539
1545
1540 def branchmap():
1546 def branchmap():
1541 """Return a mapping of branch to heads in that branch."""
1547 """Return a mapping of branch to heads in that branch."""
1542
1548
1543 def revbranchcache():
1549 def revbranchcache():
1544 pass
1550 pass
1545
1551
1546 def branchtip(branchtip, ignoremissing=False):
1552 def branchtip(branchtip, ignoremissing=False):
1547 """Return the tip node for a given branch."""
1553 """Return the tip node for a given branch."""
1548
1554
1549 def lookup(key):
1555 def lookup(key):
1550 """Resolve the node for a revision."""
1556 """Resolve the node for a revision."""
1551
1557
1552 def lookupbranch(key):
1558 def lookupbranch(key):
1553 """Look up the branch name of the given revision or branch name."""
1559 """Look up the branch name of the given revision or branch name."""
1554
1560
1555 def known(nodes):
1561 def known(nodes):
1556 """Determine whether a series of nodes is known.
1562 """Determine whether a series of nodes is known.
1557
1563
1558 Returns a list of bools.
1564 Returns a list of bools.
1559 """
1565 """
1560
1566
1561 def local():
1567 def local():
1562 """Whether the repository is local."""
1568 """Whether the repository is local."""
1563 return True
1569 return True
1564
1570
1565 def publishing():
1571 def publishing():
1566 """Whether the repository is a publishing repository."""
1572 """Whether the repository is a publishing repository."""
1567
1573
1568 def cancopy():
1574 def cancopy():
1569 pass
1575 pass
1570
1576
1571 def shared():
1577 def shared():
1572 """The type of shared repository or None."""
1578 """The type of shared repository or None."""
1573
1579
1574 def wjoin(f, *insidef):
1580 def wjoin(f, *insidef):
1575 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1581 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1576
1582
1577 def setparents(p1, p2):
1583 def setparents(p1, p2):
1578 """Set the parent nodes of the working directory."""
1584 """Set the parent nodes of the working directory."""
1579
1585
1580 def filectx(path, changeid=None, fileid=None):
1586 def filectx(path, changeid=None, fileid=None):
1581 """Obtain a filectx for the given file revision."""
1587 """Obtain a filectx for the given file revision."""
1582
1588
1583 def getcwd():
1589 def getcwd():
1584 """Obtain the current working directory from the dirstate."""
1590 """Obtain the current working directory from the dirstate."""
1585
1591
1586 def pathto(f, cwd=None):
1592 def pathto(f, cwd=None):
1587 """Obtain the relative path to a file."""
1593 """Obtain the relative path to a file."""
1588
1594
1589 def adddatafilter(name, fltr):
1595 def adddatafilter(name, fltr):
1590 pass
1596 pass
1591
1597
1592 def wread(filename):
1598 def wread(filename):
1593 """Read a file from wvfs, using data filters."""
1599 """Read a file from wvfs, using data filters."""
1594
1600
1595 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1601 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1596 """Write data to a file in the wvfs, using data filters."""
1602 """Write data to a file in the wvfs, using data filters."""
1597
1603
1598 def wwritedata(filename, data):
1604 def wwritedata(filename, data):
1599 """Resolve data for writing to the wvfs, using data filters."""
1605 """Resolve data for writing to the wvfs, using data filters."""
1600
1606
1601 def currenttransaction():
1607 def currenttransaction():
1602 """Obtain the current transaction instance or None."""
1608 """Obtain the current transaction instance or None."""
1603
1609
1604 def transaction(desc, report=None):
1610 def transaction(desc, report=None):
1605 """Open a new transaction to write to the repository."""
1611 """Open a new transaction to write to the repository."""
1606
1612
1607 def undofiles():
1613 def undofiles():
1608 """Returns a list of (vfs, path) for files to undo transactions."""
1614 """Returns a list of (vfs, path) for files to undo transactions."""
1609
1615
1610 def recover():
1616 def recover():
1611 """Roll back an interrupted transaction."""
1617 """Roll back an interrupted transaction."""
1612
1618
1613 def rollback(dryrun=False, force=False):
1619 def rollback(dryrun=False, force=False):
1614 """Undo the last transaction.
1620 """Undo the last transaction.
1615
1621
1616 DANGEROUS.
1622 DANGEROUS.
1617 """
1623 """
1618
1624
1619 def updatecaches(tr=None, full=False):
1625 def updatecaches(tr=None, full=False):
1620 """Warm repo caches."""
1626 """Warm repo caches."""
1621
1627
1622 def invalidatecaches():
1628 def invalidatecaches():
1623 """Invalidate cached data due to the repository mutating."""
1629 """Invalidate cached data due to the repository mutating."""
1624
1630
1625 def invalidatevolatilesets():
1631 def invalidatevolatilesets():
1626 pass
1632 pass
1627
1633
1628 def invalidatedirstate():
1634 def invalidatedirstate():
1629 """Invalidate the dirstate."""
1635 """Invalidate the dirstate."""
1630
1636
1631 def invalidate(clearfilecache=False):
1637 def invalidate(clearfilecache=False):
1632 pass
1638 pass
1633
1639
1634 def invalidateall():
1640 def invalidateall():
1635 pass
1641 pass
1636
1642
1637 def lock(wait=True):
1643 def lock(wait=True):
1638 """Lock the repository store and return a lock instance."""
1644 """Lock the repository store and return a lock instance."""
1639
1645
1640 def wlock(wait=True):
1646 def wlock(wait=True):
1641 """Lock the non-store parts of the repository."""
1647 """Lock the non-store parts of the repository."""
1642
1648
1643 def currentwlock():
1649 def currentwlock():
1644 """Return the wlock if it's held or None."""
1650 """Return the wlock if it's held or None."""
1645
1651
1646 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1652 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1647 pass
1653 pass
1648
1654
1649 def commit(text='', user=None, date=None, match=None, force=False,
1655 def commit(text='', user=None, date=None, match=None, force=False,
1650 editor=False, extra=None):
1656 editor=False, extra=None):
1651 """Add a new revision to the repository."""
1657 """Add a new revision to the repository."""
1652
1658
1653 def commitctx(ctx, error=False):
1659 def commitctx(ctx, error=False):
1654 """Commit a commitctx instance to the repository."""
1660 """Commit a commitctx instance to the repository."""
1655
1661
1656 def destroying():
1662 def destroying():
1657 """Inform the repository that nodes are about to be destroyed."""
1663 """Inform the repository that nodes are about to be destroyed."""
1658
1664
1659 def destroyed():
1665 def destroyed():
1660 """Inform the repository that nodes have been destroyed."""
1666 """Inform the repository that nodes have been destroyed."""
1661
1667
1662 def status(node1='.', node2=None, match=None, ignored=False,
1668 def status(node1='.', node2=None, match=None, ignored=False,
1663 clean=False, unknown=False, listsubrepos=False):
1669 clean=False, unknown=False, listsubrepos=False):
1664 """Convenience method to call repo[x].status()."""
1670 """Convenience method to call repo[x].status()."""
1665
1671
1666 def addpostdsstatus(ps):
1672 def addpostdsstatus(ps):
1667 pass
1673 pass
1668
1674
1669 def postdsstatus():
1675 def postdsstatus():
1670 pass
1676 pass
1671
1677
1672 def clearpostdsstatus():
1678 def clearpostdsstatus():
1673 pass
1679 pass
1674
1680
1675 def heads(start=None):
1681 def heads(start=None):
1676 """Obtain list of nodes that are DAG heads."""
1682 """Obtain list of nodes that are DAG heads."""
1677
1683
1678 def branchheads(branch=None, start=None, closed=False):
1684 def branchheads(branch=None, start=None, closed=False):
1679 pass
1685 pass
1680
1686
1681 def branches(nodes):
1687 def branches(nodes):
1682 pass
1688 pass
1683
1689
1684 def between(pairs):
1690 def between(pairs):
1685 pass
1691 pass
1686
1692
1687 def checkpush(pushop):
1693 def checkpush(pushop):
1688 pass
1694 pass
1689
1695
1690 prepushoutgoinghooks = interfaceutil.Attribute(
1696 prepushoutgoinghooks = interfaceutil.Attribute(
1691 """util.hooks instance.""")
1697 """util.hooks instance.""")
1692
1698
1693 def pushkey(namespace, key, old, new):
1699 def pushkey(namespace, key, old, new):
1694 pass
1700 pass
1695
1701
1696 def listkeys(namespace):
1702 def listkeys(namespace):
1697 pass
1703 pass
1698
1704
1699 def debugwireargs(one, two, three=None, four=None, five=None):
1705 def debugwireargs(one, two, three=None, four=None, five=None):
1700 pass
1706 pass
1701
1707
1702 def savecommitmessage(text):
1708 def savecommitmessage(text):
1703 pass
1709 pass
1704
1710
1705 class completelocalrepository(ilocalrepositorymain,
1711 class completelocalrepository(ilocalrepositorymain,
1706 ilocalrepositoryfilestorage):
1712 ilocalrepositoryfilestorage):
1707 """Complete interface for a local repository."""
1713 """Complete interface for a local repository."""
1708
1714
1709 class iwireprotocolcommandcacher(interfaceutil.Interface):
1715 class iwireprotocolcommandcacher(interfaceutil.Interface):
1710 """Represents a caching backend for wire protocol commands.
1716 """Represents a caching backend for wire protocol commands.
1711
1717
1712 Wire protocol version 2 supports transparent caching of many commands.
1718 Wire protocol version 2 supports transparent caching of many commands.
1713 To leverage this caching, servers can activate objects that cache
1719 To leverage this caching, servers can activate objects that cache
1714 command responses. Objects handle both cache writing and reading.
1720 command responses. Objects handle both cache writing and reading.
1715 This interface defines how that response caching mechanism works.
1721 This interface defines how that response caching mechanism works.
1716
1722
1717 Wire protocol version 2 commands emit a series of objects that are
1723 Wire protocol version 2 commands emit a series of objects that are
1718 serialized and sent to the client. The caching layer exists between
1724 serialized and sent to the client. The caching layer exists between
1719 the invocation of the command function and the sending of its output
1725 the invocation of the command function and the sending of its output
1720 objects to an output layer.
1726 objects to an output layer.
1721
1727
1722 Instances of this interface represent a binding to a cache that
1728 Instances of this interface represent a binding to a cache that
1723 can serve a response (in place of calling a command function) and/or
1729 can serve a response (in place of calling a command function) and/or
1724 write responses to a cache for subsequent use.
1730 write responses to a cache for subsequent use.
1725
1731
1726 When a command request arrives, the following happens with regards
1732 When a command request arrives, the following happens with regards
1727 to this interface:
1733 to this interface:
1728
1734
1729 1. The server determines whether the command request is cacheable.
1735 1. The server determines whether the command request is cacheable.
1730 2. If it is, an instance of this interface is spawned.
1736 2. If it is, an instance of this interface is spawned.
1731 3. The cacher is activated in a context manager (``__enter__`` is called).
1737 3. The cacher is activated in a context manager (``__enter__`` is called).
1732 4. A cache *key* for that request is derived. This will call the
1738 4. A cache *key* for that request is derived. This will call the
1733 instance's ``adjustcachekeystate()`` method so the derivation
1739 instance's ``adjustcachekeystate()`` method so the derivation
1734 can be influenced.
1740 can be influenced.
1735 5. The cacher is informed of the derived cache key via a call to
1741 5. The cacher is informed of the derived cache key via a call to
1736 ``setcachekey()``.
1742 ``setcachekey()``.
1737 6. The cacher's ``lookup()`` method is called to test for presence of
1743 6. The cacher's ``lookup()`` method is called to test for presence of
1738 the derived key in the cache.
1744 the derived key in the cache.
1739 7. If ``lookup()`` returns a hit, that cached result is used in place
1745 7. If ``lookup()`` returns a hit, that cached result is used in place
1740 of invoking the command function. ``__exit__`` is called and the instance
1746 of invoking the command function. ``__exit__`` is called and the instance
1741 is discarded.
1747 is discarded.
1742 8. The command function is invoked.
1748 8. The command function is invoked.
1743 9. ``onobject()`` is called for each object emitted by the command
1749 9. ``onobject()`` is called for each object emitted by the command
1744 function.
1750 function.
1745 10. After the final object is seen, ``onfinished()`` is called.
1751 10. After the final object is seen, ``onfinished()`` is called.
1746 11. ``__exit__`` is called to signal the end of use of the instance.
1752 11. ``__exit__`` is called to signal the end of use of the instance.
1747
1753
1748 Cache *key* derivation can be influenced by the instance.
1754 Cache *key* derivation can be influenced by the instance.
1749
1755
1750 Cache keys are initially derived by a deterministic representation of
1756 Cache keys are initially derived by a deterministic representation of
1751 the command request. This includes the command name, arguments, protocol
1757 the command request. This includes the command name, arguments, protocol
1752 version, etc. This initial key derivation is performed by CBOR-encoding a
1758 version, etc. This initial key derivation is performed by CBOR-encoding a
1753 data structure and feeding that output into a hasher.
1759 data structure and feeding that output into a hasher.
1754
1760
1755 Instances of this interface can influence this initial key derivation
1761 Instances of this interface can influence this initial key derivation
1756 via ``adjustcachekeystate()``.
1762 via ``adjustcachekeystate()``.
1757
1763
1758 The instance is informed of the derived cache key via a call to
1764 The instance is informed of the derived cache key via a call to
1759 ``setcachekey()``. The instance must store the key locally so it can
1765 ``setcachekey()``. The instance must store the key locally so it can
1760 be consulted on subsequent operations that may require it.
1766 be consulted on subsequent operations that may require it.
1761
1767
1762 When constructed, the instance has access to a callable that can be used
1768 When constructed, the instance has access to a callable that can be used
1763 for encoding response objects. This callable receives as its single
1769 for encoding response objects. This callable receives as its single
1764 argument an object emitted by a command function. It returns an iterable
1770 argument an object emitted by a command function. It returns an iterable
1765 of bytes chunks representing the encoded object. Unless the cacher is
1771 of bytes chunks representing the encoded object. Unless the cacher is
1766 caching native Python objects in memory or has a way of reconstructing
1772 caching native Python objects in memory or has a way of reconstructing
1767 the original Python objects, implementations typically call this function
1773 the original Python objects, implementations typically call this function
1768 to produce bytes from the output objects and then store those bytes in
1774 to produce bytes from the output objects and then store those bytes in
1769 the cache. When it comes time to re-emit those bytes, they are wrapped
1775 the cache. When it comes time to re-emit those bytes, they are wrapped
1770 in a ``wireprototypes.encodedresponse`` instance to tell the output
1776 in a ``wireprototypes.encodedresponse`` instance to tell the output
1771 layer that they are pre-encoded.
1777 layer that they are pre-encoded.
1772
1778
1773 When receiving the objects emitted by the command function, instances
1779 When receiving the objects emitted by the command function, instances
1774 can choose what to do with those objects. The simplest thing to do is
1780 can choose what to do with those objects. The simplest thing to do is
1775 re-emit the original objects. They will be forwarded to the output
1781 re-emit the original objects. They will be forwarded to the output
1776 layer and will be processed as if the cacher did not exist.
1782 layer and will be processed as if the cacher did not exist.
1777
1783
1778 Implementations could also choose to not emit objects - instead locally
1784 Implementations could also choose to not emit objects - instead locally
1779 buffering objects or their encoded representation. They could then emit
1785 buffering objects or their encoded representation. They could then emit
1780 a single "coalesced" object when ``onfinished()`` is called. In
1786 a single "coalesced" object when ``onfinished()`` is called. In
1781 this way, the implementation would function as a filtering layer of
1787 this way, the implementation would function as a filtering layer of
1782 sorts.
1788 sorts.
1783
1789
1784 When caching objects, typically the encoded form of the object will
1790 When caching objects, typically the encoded form of the object will
1785 be stored. Keep in mind that if the original object is forwarded to
1791 be stored. Keep in mind that if the original object is forwarded to
1786 the output layer, it will need to be encoded there as well. For large
1792 the output layer, it will need to be encoded there as well. For large
1787 output, this redundant encoding could add overhead. Implementations
1793 output, this redundant encoding could add overhead. Implementations
1788 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1794 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1789 instances to avoid this overhead.
1795 instances to avoid this overhead.
1790 """
1796 """
1791 def __enter__():
1797 def __enter__():
1792 """Marks the instance as active.
1798 """Marks the instance as active.
1793
1799
1794 Should return self.
1800 Should return self.
1795 """
1801 """
1796
1802
1797 def __exit__(exctype, excvalue, exctb):
1803 def __exit__(exctype, excvalue, exctb):
1798 """Called when cacher is no longer used.
1804 """Called when cacher is no longer used.
1799
1805
1800 This can be used by implementations to perform cleanup actions (e.g.
1806 This can be used by implementations to perform cleanup actions (e.g.
1801 disconnecting network sockets, aborting a partially cached response.
1807 disconnecting network sockets, aborting a partially cached response.
1802 """
1808 """
1803
1809
1804 def adjustcachekeystate(state):
1810 def adjustcachekeystate(state):
1805 """Influences cache key derivation by adjusting state to derive key.
1811 """Influences cache key derivation by adjusting state to derive key.
1806
1812
1807 A dict defining the state used to derive the cache key is passed.
1813 A dict defining the state used to derive the cache key is passed.
1808
1814
1809 Implementations can modify this dict to record additional state that
1815 Implementations can modify this dict to record additional state that
1810 is wanted to influence key derivation.
1816 is wanted to influence key derivation.
1811
1817
1812 Implementations are *highly* encouraged to not modify or delete
1818 Implementations are *highly* encouraged to not modify or delete
1813 existing keys.
1819 existing keys.
1814 """
1820 """
1815
1821
1816 def setcachekey(key):
1822 def setcachekey(key):
1817 """Record the derived cache key for this request.
1823 """Record the derived cache key for this request.
1818
1824
1819 Instances may mutate the key for internal usage, as desired. e.g.
1825 Instances may mutate the key for internal usage, as desired. e.g.
1820 instances may wish to prepend the repo name, introduce path
1826 instances may wish to prepend the repo name, introduce path
1821 components for filesystem or URL addressing, etc. Behavior is up to
1827 components for filesystem or URL addressing, etc. Behavior is up to
1822 the cache.
1828 the cache.
1823
1829
1824 Returns a bool indicating if the request is cacheable by this
1830 Returns a bool indicating if the request is cacheable by this
1825 instance.
1831 instance.
1826 """
1832 """
1827
1833
1828 def lookup():
1834 def lookup():
1829 """Attempt to resolve an entry in the cache.
1835 """Attempt to resolve an entry in the cache.
1830
1836
1831 The instance is instructed to look for the cache key that it was
1837 The instance is instructed to look for the cache key that it was
1832 informed about via the call to ``setcachekey()``.
1838 informed about via the call to ``setcachekey()``.
1833
1839
1834 If there's no cache hit or the cacher doesn't wish to use the cached
1840 If there's no cache hit or the cacher doesn't wish to use the cached
1835 entry, ``None`` should be returned.
1841 entry, ``None`` should be returned.
1836
1842
1837 Else, a dict defining the cached result should be returned. The
1843 Else, a dict defining the cached result should be returned. The
1838 dict may have the following keys:
1844 dict may have the following keys:
1839
1845
1840 objs
1846 objs
1841 An iterable of objects that should be sent to the client. That
1847 An iterable of objects that should be sent to the client. That
1842 iterable of objects is expected to be what the command function
1848 iterable of objects is expected to be what the command function
1843 would return if invoked or an equivalent representation thereof.
1849 would return if invoked or an equivalent representation thereof.
1844 """
1850 """
1845
1851
1846 def onobject(obj):
1852 def onobject(obj):
1847 """Called when a new object is emitted from the command function.
1853 """Called when a new object is emitted from the command function.
1848
1854
1849 Receives as its argument the object that was emitted from the
1855 Receives as its argument the object that was emitted from the
1850 command function.
1856 command function.
1851
1857
1852 This method returns an iterator of objects to forward to the output
1858 This method returns an iterator of objects to forward to the output
1853 layer. The easiest implementation is a generator that just
1859 layer. The easiest implementation is a generator that just
1854 ``yield obj``.
1860 ``yield obj``.
1855 """
1861 """
1856
1862
1857 def onfinished():
1863 def onfinished():
1858 """Called after all objects have been emitted from the command function.
1864 """Called after all objects have been emitted from the command function.
1859
1865
1860 Implementations should return an iterator of objects to forward to
1866 Implementations should return an iterator of objects to forward to
1861 the output layer.
1867 the output layer.
1862
1868
1863 This method can be a generator.
1869 This method can be a generator.
1864 """
1870 """
General Comments 0
You need to be logged in to leave comments. Login now