##// END OF EJS Templates
httppeer: use %s for formatting...
Gregory Szorc -
r41345:a9b609fb default
parent child Browse files
Show More
@@ -1,1006 +1,1006 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 repository,
24 repository,
25 statichttprepo,
25 statichttprepo,
26 url as urlmod,
26 url as urlmod,
27 util,
27 util,
28 wireprotoframing,
28 wireprotoframing,
29 wireprototypes,
29 wireprototypes,
30 wireprotov1peer,
30 wireprotov1peer,
31 wireprotov2peer,
31 wireprotov2peer,
32 wireprotov2server,
32 wireprotov2server,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 cborutil,
35 cborutil,
36 interfaceutil,
36 interfaceutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 httplib = util.httplib
40 httplib = util.httplib
41 urlerr = util.urlerr
41 urlerr = util.urlerr
42 urlreq = util.urlreq
42 urlreq = util.urlreq
43
43
44 def encodevalueinheaders(value, header, limit):
44 def encodevalueinheaders(value, header, limit):
45 """Encode a string value into multiple HTTP headers.
45 """Encode a string value into multiple HTTP headers.
46
46
47 ``value`` will be encoded into 1 or more HTTP headers with the names
47 ``value`` will be encoded into 1 or more HTTP headers with the names
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
48 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
49 name + value will be at most ``limit`` bytes long.
49 name + value will be at most ``limit`` bytes long.
50
50
51 Returns an iterable of 2-tuples consisting of header names and
51 Returns an iterable of 2-tuples consisting of header names and
52 values as native strings.
52 values as native strings.
53 """
53 """
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
54 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
55 # not bytes. This function always takes bytes in as arguments.
55 # not bytes. This function always takes bytes in as arguments.
56 fmt = pycompat.strurl(header) + r'-%s'
56 fmt = pycompat.strurl(header) + r'-%s'
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
57 # Note: it is *NOT* a bug that the last bit here is a bytestring
58 # and not a unicode: we're just getting the encoded length anyway,
58 # and not a unicode: we're just getting the encoded length anyway,
59 # and using an r-string to make it portable between Python 2 and 3
59 # and using an r-string to make it portable between Python 2 and 3
60 # doesn't work because then the \r is a literal backslash-r
60 # doesn't work because then the \r is a literal backslash-r
61 # instead of a carriage return.
61 # instead of a carriage return.
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
62 valuelen = limit - len(fmt % r'000') - len(': \r\n')
63 result = []
63 result = []
64
64
65 n = 0
65 n = 0
66 for i in pycompat.xrange(0, len(value), valuelen):
66 for i in pycompat.xrange(0, len(value), valuelen):
67 n += 1
67 n += 1
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
68 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
69
69
70 return result
70 return result
71
71
72 class _multifile(object):
72 class _multifile(object):
73 def __init__(self, *fileobjs):
73 def __init__(self, *fileobjs):
74 for f in fileobjs:
74 for f in fileobjs:
75 if not util.safehasattr(f, 'length'):
75 if not util.safehasattr(f, 'length'):
76 raise ValueError(
76 raise ValueError(
77 '_multifile only supports file objects that '
77 '_multifile only supports file objects that '
78 'have a length but this one does not:', type(f), f)
78 'have a length but this one does not:', type(f), f)
79 self._fileobjs = fileobjs
79 self._fileobjs = fileobjs
80 self._index = 0
80 self._index = 0
81
81
82 @property
82 @property
83 def length(self):
83 def length(self):
84 return sum(f.length for f in self._fileobjs)
84 return sum(f.length for f in self._fileobjs)
85
85
86 def read(self, amt=None):
86 def read(self, amt=None):
87 if amt <= 0:
87 if amt <= 0:
88 return ''.join(f.read() for f in self._fileobjs)
88 return ''.join(f.read() for f in self._fileobjs)
89 parts = []
89 parts = []
90 while amt and self._index < len(self._fileobjs):
90 while amt and self._index < len(self._fileobjs):
91 parts.append(self._fileobjs[self._index].read(amt))
91 parts.append(self._fileobjs[self._index].read(amt))
92 got = len(parts[-1])
92 got = len(parts[-1])
93 if got < amt:
93 if got < amt:
94 self._index += 1
94 self._index += 1
95 amt -= got
95 amt -= got
96 return ''.join(parts)
96 return ''.join(parts)
97
97
98 def seek(self, offset, whence=os.SEEK_SET):
98 def seek(self, offset, whence=os.SEEK_SET):
99 if whence != os.SEEK_SET:
99 if whence != os.SEEK_SET:
100 raise NotImplementedError(
100 raise NotImplementedError(
101 '_multifile does not support anything other'
101 '_multifile does not support anything other'
102 ' than os.SEEK_SET for whence on seek()')
102 ' than os.SEEK_SET for whence on seek()')
103 if offset != 0:
103 if offset != 0:
104 raise NotImplementedError(
104 raise NotImplementedError(
105 '_multifile only supports seeking to start, but that '
105 '_multifile only supports seeking to start, but that '
106 'could be fixed if you need it')
106 'could be fixed if you need it')
107 for f in self._fileobjs:
107 for f in self._fileobjs:
108 f.seek(0)
108 f.seek(0)
109 self._index = 0
109 self._index = 0
110
110
111 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
111 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
112 repobaseurl, cmd, args):
112 repobaseurl, cmd, args):
113 """Make an HTTP request to run a command for a version 1 client.
113 """Make an HTTP request to run a command for a version 1 client.
114
114
115 ``caps`` is a set of known server capabilities. The value may be
115 ``caps`` is a set of known server capabilities. The value may be
116 None if capabilities are not yet known.
116 None if capabilities are not yet known.
117
117
118 ``capablefn`` is a function to evaluate a capability.
118 ``capablefn`` is a function to evaluate a capability.
119
119
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 raw data to pass to it.
121 raw data to pass to it.
122 """
122 """
123 if cmd == 'pushkey':
123 if cmd == 'pushkey':
124 args['data'] = ''
124 args['data'] = ''
125 data = args.pop('data', None)
125 data = args.pop('data', None)
126 headers = args.pop('headers', {})
126 headers = args.pop('headers', {})
127
127
128 ui.debug("sending %s command\n" % cmd)
128 ui.debug("sending %s command\n" % cmd)
129 q = [('cmd', cmd)]
129 q = [('cmd', cmd)]
130 headersize = 0
130 headersize = 0
131 # Important: don't use self.capable() here or else you end up
131 # Important: don't use self.capable() here or else you end up
132 # with infinite recursion when trying to look up capabilities
132 # with infinite recursion when trying to look up capabilities
133 # for the first time.
133 # for the first time.
134 postargsok = caps is not None and 'httppostargs' in caps
134 postargsok = caps is not None and 'httppostargs' in caps
135
135
136 # Send arguments via POST.
136 # Send arguments via POST.
137 if postargsok and args:
137 if postargsok and args:
138 strargs = urlreq.urlencode(sorted(args.items()))
138 strargs = urlreq.urlencode(sorted(args.items()))
139 if not data:
139 if not data:
140 data = strargs
140 data = strargs
141 else:
141 else:
142 if isinstance(data, bytes):
142 if isinstance(data, bytes):
143 i = io.BytesIO(data)
143 i = io.BytesIO(data)
144 i.length = len(data)
144 i.length = len(data)
145 data = i
145 data = i
146 argsio = io.BytesIO(strargs)
146 argsio = io.BytesIO(strargs)
147 argsio.length = len(strargs)
147 argsio.length = len(strargs)
148 data = _multifile(argsio, data)
148 data = _multifile(argsio, data)
149 headers[r'X-HgArgs-Post'] = len(strargs)
149 headers[r'X-HgArgs-Post'] = len(strargs)
150 elif args:
150 elif args:
151 # Calling self.capable() can infinite loop if we are calling
151 # Calling self.capable() can infinite loop if we are calling
152 # "capabilities". But that command should never accept wire
152 # "capabilities". But that command should never accept wire
153 # protocol arguments. So this should never happen.
153 # protocol arguments. So this should never happen.
154 assert cmd != 'capabilities'
154 assert cmd != 'capabilities'
155 httpheader = capablefn('httpheader')
155 httpheader = capablefn('httpheader')
156 if httpheader:
156 if httpheader:
157 headersize = int(httpheader.split(',', 1)[0])
157 headersize = int(httpheader.split(',', 1)[0])
158
158
159 # Send arguments via HTTP headers.
159 # Send arguments via HTTP headers.
160 if headersize > 0:
160 if headersize > 0:
161 # The headers can typically carry more data than the URL.
161 # The headers can typically carry more data than the URL.
162 encargs = urlreq.urlencode(sorted(args.items()))
162 encargs = urlreq.urlencode(sorted(args.items()))
163 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
163 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
164 headersize):
164 headersize):
165 headers[header] = value
165 headers[header] = value
166 # Send arguments via query string (Mercurial <1.9).
166 # Send arguments via query string (Mercurial <1.9).
167 else:
167 else:
168 q += sorted(args.items())
168 q += sorted(args.items())
169
169
170 qs = '?%s' % urlreq.urlencode(q)
170 qs = '?%s' % urlreq.urlencode(q)
171 cu = "%s%s" % (repobaseurl, qs)
171 cu = "%s%s" % (repobaseurl, qs)
172 size = 0
172 size = 0
173 if util.safehasattr(data, 'length'):
173 if util.safehasattr(data, 'length'):
174 size = data.length
174 size = data.length
175 elif data is not None:
175 elif data is not None:
176 size = len(data)
176 size = len(data)
177 if data is not None and r'Content-Type' not in headers:
177 if data is not None and r'Content-Type' not in headers:
178 headers[r'Content-Type'] = r'application/mercurial-0.1'
178 headers[r'Content-Type'] = r'application/mercurial-0.1'
179
179
180 # Tell the server we accept application/mercurial-0.2 and multiple
180 # Tell the server we accept application/mercurial-0.2 and multiple
181 # compression formats if the server is capable of emitting those
181 # compression formats if the server is capable of emitting those
182 # payloads.
182 # payloads.
183 # Note: Keep this set empty by default, as client advertisement of
183 # Note: Keep this set empty by default, as client advertisement of
184 # protocol parameters should only occur after the handshake.
184 # protocol parameters should only occur after the handshake.
185 protoparams = set()
185 protoparams = set()
186
186
187 mediatypes = set()
187 mediatypes = set()
188 if caps is not None:
188 if caps is not None:
189 mt = capablefn('httpmediatype')
189 mt = capablefn('httpmediatype')
190 if mt:
190 if mt:
191 protoparams.add('0.1')
191 protoparams.add('0.1')
192 mediatypes = set(mt.split(','))
192 mediatypes = set(mt.split(','))
193
193
194 protoparams.add('partial-pull')
194 protoparams.add('partial-pull')
195
195
196 if '0.2tx' in mediatypes:
196 if '0.2tx' in mediatypes:
197 protoparams.add('0.2')
197 protoparams.add('0.2')
198
198
199 if '0.2tx' in mediatypes and capablefn('compression'):
199 if '0.2tx' in mediatypes and capablefn('compression'):
200 # We /could/ compare supported compression formats and prune
200 # We /could/ compare supported compression formats and prune
201 # non-mutually supported or error if nothing is mutually supported.
201 # non-mutually supported or error if nothing is mutually supported.
202 # For now, send the full list to the server and have it error.
202 # For now, send the full list to the server and have it error.
203 comps = [e.wireprotosupport().name for e in
203 comps = [e.wireprotosupport().name for e in
204 util.compengines.supportedwireengines(util.CLIENTROLE)]
204 util.compengines.supportedwireengines(util.CLIENTROLE)]
205 protoparams.add('comp=%s' % ','.join(comps))
205 protoparams.add('comp=%s' % ','.join(comps))
206
206
207 if protoparams:
207 if protoparams:
208 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
208 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
209 'X-HgProto',
209 'X-HgProto',
210 headersize or 1024)
210 headersize or 1024)
211 for header, value in protoheaders:
211 for header, value in protoheaders:
212 headers[header] = value
212 headers[header] = value
213
213
214 varyheaders = []
214 varyheaders = []
215 for header in headers:
215 for header in headers:
216 if header.lower().startswith(r'x-hg'):
216 if header.lower().startswith(r'x-hg'):
217 varyheaders.append(header)
217 varyheaders.append(header)
218
218
219 if varyheaders:
219 if varyheaders:
220 headers[r'Vary'] = r','.join(sorted(varyheaders))
220 headers[r'Vary'] = r','.join(sorted(varyheaders))
221
221
222 req = requestbuilder(pycompat.strurl(cu), data, headers)
222 req = requestbuilder(pycompat.strurl(cu), data, headers)
223
223
224 if data is not None:
224 if data is not None:
225 ui.debug("sending %d bytes\n" % size)
225 ui.debug("sending %d bytes\n" % size)
226 req.add_unredirected_header(r'Content-Length', r'%d' % size)
226 req.add_unredirected_header(r'Content-Length', r'%d' % size)
227
227
228 return req, cu, qs
228 return req, cu, qs
229
229
230 def _reqdata(req):
230 def _reqdata(req):
231 """Get request data, if any. If no data, returns None."""
231 """Get request data, if any. If no data, returns None."""
232 if pycompat.ispy3:
232 if pycompat.ispy3:
233 return req.data
233 return req.data
234 if not req.has_data():
234 if not req.has_data():
235 return None
235 return None
236 return req.get_data()
236 return req.get_data()
237
237
238 def sendrequest(ui, opener, req):
238 def sendrequest(ui, opener, req):
239 """Send a prepared HTTP request.
239 """Send a prepared HTTP request.
240
240
241 Returns the response object.
241 Returns the response object.
242 """
242 """
243 dbg = ui.debug
243 dbg = ui.debug
244 if (ui.debugflag
244 if (ui.debugflag
245 and ui.configbool('devel', 'debug.peer-request')):
245 and ui.configbool('devel', 'debug.peer-request')):
246 line = 'devel-peer-request: %s\n'
246 line = 'devel-peer-request: %s\n'
247 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
247 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
248 pycompat.bytesurl(req.get_full_url())))
248 pycompat.bytesurl(req.get_full_url())))
249 hgargssize = None
249 hgargssize = None
250
250
251 for header, value in sorted(req.header_items()):
251 for header, value in sorted(req.header_items()):
252 header = pycompat.bytesurl(header)
252 header = pycompat.bytesurl(header)
253 value = pycompat.bytesurl(value)
253 value = pycompat.bytesurl(value)
254 if header.startswith('X-hgarg-'):
254 if header.startswith('X-hgarg-'):
255 if hgargssize is None:
255 if hgargssize is None:
256 hgargssize = 0
256 hgargssize = 0
257 hgargssize += len(value)
257 hgargssize += len(value)
258 else:
258 else:
259 dbg(line % ' %s %s' % (header, value))
259 dbg(line % ' %s %s' % (header, value))
260
260
261 if hgargssize is not None:
261 if hgargssize is not None:
262 dbg(line % ' %d bytes of commands arguments in headers'
262 dbg(line % ' %d bytes of commands arguments in headers'
263 % hgargssize)
263 % hgargssize)
264 data = _reqdata(req)
264 data = _reqdata(req)
265 if data is not None:
265 if data is not None:
266 length = getattr(data, 'length', None)
266 length = getattr(data, 'length', None)
267 if length is None:
267 if length is None:
268 length = len(data)
268 length = len(data)
269 dbg(line % ' %d bytes of data' % length)
269 dbg(line % ' %d bytes of data' % length)
270
270
271 start = util.timer()
271 start = util.timer()
272
272
273 res = None
273 res = None
274 try:
274 try:
275 res = opener.open(req)
275 res = opener.open(req)
276 except urlerr.httperror as inst:
276 except urlerr.httperror as inst:
277 if inst.code == 401:
277 if inst.code == 401:
278 raise error.Abort(_('authorization failed'))
278 raise error.Abort(_('authorization failed'))
279 raise
279 raise
280 except httplib.HTTPException as inst:
280 except httplib.HTTPException as inst:
281 ui.debug('http error requesting %s\n' %
281 ui.debug('http error requesting %s\n' %
282 util.hidepassword(req.get_full_url()))
282 util.hidepassword(req.get_full_url()))
283 ui.traceback()
283 ui.traceback()
284 raise IOError(None, inst)
284 raise IOError(None, inst)
285 finally:
285 finally:
286 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
286 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
287 code = res.code if res else -1
287 code = res.code if res else -1
288 dbg(line % ' finished in %.4f seconds (%d)'
288 dbg(line % ' finished in %.4f seconds (%d)'
289 % (util.timer() - start, code))
289 % (util.timer() - start, code))
290
290
291 # Insert error handlers for common I/O failures.
291 # Insert error handlers for common I/O failures.
292 urlmod.wrapresponse(res)
292 urlmod.wrapresponse(res)
293
293
294 return res
294 return res
295
295
296 class RedirectedRepoError(error.RepoError):
296 class RedirectedRepoError(error.RepoError):
297 def __init__(self, msg, respurl):
297 def __init__(self, msg, respurl):
298 super(RedirectedRepoError, self).__init__(msg)
298 super(RedirectedRepoError, self).__init__(msg)
299 self.respurl = respurl
299 self.respurl = respurl
300
300
301 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
301 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
302 allowcbor=False):
302 allowcbor=False):
303 # record the url we got redirected to
303 # record the url we got redirected to
304 redirected = False
304 redirected = False
305 respurl = pycompat.bytesurl(resp.geturl())
305 respurl = pycompat.bytesurl(resp.geturl())
306 if respurl.endswith(qs):
306 if respurl.endswith(qs):
307 respurl = respurl[:-len(qs)]
307 respurl = respurl[:-len(qs)]
308 qsdropped = False
308 qsdropped = False
309 else:
309 else:
310 qsdropped = True
310 qsdropped = True
311
311
312 if baseurl.rstrip('/') != respurl.rstrip('/'):
312 if baseurl.rstrip('/') != respurl.rstrip('/'):
313 redirected = True
313 redirected = True
314 if not ui.quiet:
314 if not ui.quiet:
315 ui.warn(_('real URL is %s\n') % respurl)
315 ui.warn(_('real URL is %s\n') % respurl)
316
316
317 try:
317 try:
318 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
318 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
319 except AttributeError:
319 except AttributeError:
320 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
320 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
321
321
322 safeurl = util.hidepassword(baseurl)
322 safeurl = util.hidepassword(baseurl)
323 if proto.startswith('application/hg-error'):
323 if proto.startswith('application/hg-error'):
324 raise error.OutOfBandError(resp.read())
324 raise error.OutOfBandError(resp.read())
325
325
326 # Pre 1.0 versions of Mercurial used text/plain and
326 # Pre 1.0 versions of Mercurial used text/plain and
327 # application/hg-changegroup. We don't support such old servers.
327 # application/hg-changegroup. We don't support such old servers.
328 if not proto.startswith('application/mercurial-'):
328 if not proto.startswith('application/mercurial-'):
329 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
329 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
330 msg = _("'%s' does not appear to be an hg repository:\n"
330 msg = _("'%s' does not appear to be an hg repository:\n"
331 "---%%<--- (%s)\n%s\n---%%<---\n") % (
331 "---%%<--- (%s)\n%s\n---%%<---\n") % (
332 safeurl, proto or 'no content-type', resp.read(1024))
332 safeurl, proto or 'no content-type', resp.read(1024))
333
333
334 # Some servers may strip the query string from the redirect. We
334 # Some servers may strip the query string from the redirect. We
335 # raise a special error type so callers can react to this specially.
335 # raise a special error type so callers can react to this specially.
336 if redirected and qsdropped:
336 if redirected and qsdropped:
337 raise RedirectedRepoError(msg, respurl)
337 raise RedirectedRepoError(msg, respurl)
338 else:
338 else:
339 raise error.RepoError(msg)
339 raise error.RepoError(msg)
340
340
341 try:
341 try:
342 subtype = proto.split('-', 1)[1]
342 subtype = proto.split('-', 1)[1]
343
343
344 # Unless we end up supporting CBOR in the legacy wire protocol,
344 # Unless we end up supporting CBOR in the legacy wire protocol,
345 # this should ONLY be encountered for the initial capabilities
345 # this should ONLY be encountered for the initial capabilities
346 # request during handshake.
346 # request during handshake.
347 if subtype == 'cbor':
347 if subtype == 'cbor':
348 if allowcbor:
348 if allowcbor:
349 return respurl, proto, resp
349 return respurl, proto, resp
350 else:
350 else:
351 raise error.RepoError(_('unexpected CBOR response from '
351 raise error.RepoError(_('unexpected CBOR response from '
352 'server'))
352 'server'))
353
353
354 version_info = tuple([int(n) for n in subtype.split('.')])
354 version_info = tuple([int(n) for n in subtype.split('.')])
355 except ValueError:
355 except ValueError:
356 raise error.RepoError(_("'%s' sent a broken Content-Type "
356 raise error.RepoError(_("'%s' sent a broken Content-Type "
357 "header (%s)") % (safeurl, proto))
357 "header (%s)") % (safeurl, proto))
358
358
359 # TODO consider switching to a decompression reader that uses
359 # TODO consider switching to a decompression reader that uses
360 # generators.
360 # generators.
361 if version_info == (0, 1):
361 if version_info == (0, 1):
362 if compressible:
362 if compressible:
363 resp = util.compengines['zlib'].decompressorreader(resp)
363 resp = util.compengines['zlib'].decompressorreader(resp)
364
364
365 elif version_info == (0, 2):
365 elif version_info == (0, 2):
366 # application/mercurial-0.2 always identifies the compression
366 # application/mercurial-0.2 always identifies the compression
367 # engine in the payload header.
367 # engine in the payload header.
368 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
368 elen = struct.unpack('B', util.readexactly(resp, 1))[0]
369 ename = util.readexactly(resp, elen)
369 ename = util.readexactly(resp, elen)
370 engine = util.compengines.forwiretype(ename)
370 engine = util.compengines.forwiretype(ename)
371
371
372 resp = engine.decompressorreader(resp)
372 resp = engine.decompressorreader(resp)
373 else:
373 else:
374 raise error.RepoError(_("'%s' uses newer protocol %s") %
374 raise error.RepoError(_("'%s' uses newer protocol %s") %
375 (safeurl, subtype))
375 (safeurl, subtype))
376
376
377 return respurl, proto, resp
377 return respurl, proto, resp
378
378
379 class httppeer(wireprotov1peer.wirepeer):
379 class httppeer(wireprotov1peer.wirepeer):
380 def __init__(self, ui, path, url, opener, requestbuilder, caps):
380 def __init__(self, ui, path, url, opener, requestbuilder, caps):
381 self.ui = ui
381 self.ui = ui
382 self._path = path
382 self._path = path
383 self._url = url
383 self._url = url
384 self._caps = caps
384 self._caps = caps
385 self._urlopener = opener
385 self._urlopener = opener
386 self._requestbuilder = requestbuilder
386 self._requestbuilder = requestbuilder
387
387
388 def __del__(self):
388 def __del__(self):
389 for h in self._urlopener.handlers:
389 for h in self._urlopener.handlers:
390 h.close()
390 h.close()
391 getattr(h, "close_all", lambda: None)()
391 getattr(h, "close_all", lambda: None)()
392
392
393 # Begin of ipeerconnection interface.
393 # Begin of ipeerconnection interface.
394
394
395 def url(self):
395 def url(self):
396 return self._path
396 return self._path
397
397
398 def local(self):
398 def local(self):
399 return None
399 return None
400
400
401 def peer(self):
401 def peer(self):
402 return self
402 return self
403
403
404 def canpush(self):
404 def canpush(self):
405 return True
405 return True
406
406
407 def close(self):
407 def close(self):
408 try:
408 try:
409 reqs, sent, recv = (self._urlopener.requestscount,
409 reqs, sent, recv = (self._urlopener.requestscount,
410 self._urlopener.sentbytescount,
410 self._urlopener.sentbytescount,
411 self._urlopener.receivedbytescount)
411 self._urlopener.receivedbytescount)
412 except AttributeError:
412 except AttributeError:
413 return
413 return
414 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
414 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
415 'received %d bytes in responses)\n') %
415 'received %d bytes in responses)\n') %
416 (reqs, sent, recv))
416 (reqs, sent, recv))
417
417
418 # End of ipeerconnection interface.
418 # End of ipeerconnection interface.
419
419
420 # Begin of ipeercommands interface.
420 # Begin of ipeercommands interface.
421
421
422 def capabilities(self):
422 def capabilities(self):
423 return self._caps
423 return self._caps
424
424
425 # End of ipeercommands interface.
425 # End of ipeercommands interface.
426
426
427 def _callstream(self, cmd, _compressible=False, **args):
427 def _callstream(self, cmd, _compressible=False, **args):
428 args = pycompat.byteskwargs(args)
428 args = pycompat.byteskwargs(args)
429
429
430 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
430 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
431 self._caps, self.capable,
431 self._caps, self.capable,
432 self._url, cmd, args)
432 self._url, cmd, args)
433
433
434 resp = sendrequest(self.ui, self._urlopener, req)
434 resp = sendrequest(self.ui, self._urlopener, req)
435
435
436 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
436 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
437 resp, _compressible)
437 resp, _compressible)
438
438
439 return resp
439 return resp
440
440
441 def _call(self, cmd, **args):
441 def _call(self, cmd, **args):
442 fp = self._callstream(cmd, **args)
442 fp = self._callstream(cmd, **args)
443 try:
443 try:
444 return fp.read()
444 return fp.read()
445 finally:
445 finally:
446 # if using keepalive, allow connection to be reused
446 # if using keepalive, allow connection to be reused
447 fp.close()
447 fp.close()
448
448
449 def _callpush(self, cmd, cg, **args):
449 def _callpush(self, cmd, cg, **args):
450 # have to stream bundle to a temp file because we do not have
450 # have to stream bundle to a temp file because we do not have
451 # http 1.1 chunked transfer.
451 # http 1.1 chunked transfer.
452
452
453 types = self.capable('unbundle')
453 types = self.capable('unbundle')
454 try:
454 try:
455 types = types.split(',')
455 types = types.split(',')
456 except AttributeError:
456 except AttributeError:
457 # servers older than d1b16a746db6 will send 'unbundle' as a
457 # servers older than d1b16a746db6 will send 'unbundle' as a
458 # boolean capability. They only support headerless/uncompressed
458 # boolean capability. They only support headerless/uncompressed
459 # bundles.
459 # bundles.
460 types = [""]
460 types = [""]
461 for x in types:
461 for x in types:
462 if x in bundle2.bundletypes:
462 if x in bundle2.bundletypes:
463 type = x
463 type = x
464 break
464 break
465
465
466 tempname = bundle2.writebundle(self.ui, cg, None, type)
466 tempname = bundle2.writebundle(self.ui, cg, None, type)
467 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
467 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
468 headers = {r'Content-Type': r'application/mercurial-0.1'}
468 headers = {r'Content-Type': r'application/mercurial-0.1'}
469
469
470 try:
470 try:
471 r = self._call(cmd, data=fp, headers=headers, **args)
471 r = self._call(cmd, data=fp, headers=headers, **args)
472 vals = r.split('\n', 1)
472 vals = r.split('\n', 1)
473 if len(vals) < 2:
473 if len(vals) < 2:
474 raise error.ResponseError(_("unexpected response:"), r)
474 raise error.ResponseError(_("unexpected response:"), r)
475 return vals
475 return vals
476 except urlerr.httperror:
476 except urlerr.httperror:
477 # Catch and re-raise these so we don't try and treat them
477 # Catch and re-raise these so we don't try and treat them
478 # like generic socket errors. They lack any values in
478 # like generic socket errors. They lack any values in
479 # .args on Python 3 which breaks our socket.error block.
479 # .args on Python 3 which breaks our socket.error block.
480 raise
480 raise
481 except socket.error as err:
481 except socket.error as err:
482 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
482 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
483 raise error.Abort(_('push failed: %s') % err.args[1])
483 raise error.Abort(_('push failed: %s') % err.args[1])
484 raise error.Abort(err.args[1])
484 raise error.Abort(err.args[1])
485 finally:
485 finally:
486 fp.close()
486 fp.close()
487 os.unlink(tempname)
487 os.unlink(tempname)
488
488
489 def _calltwowaystream(self, cmd, fp, **args):
489 def _calltwowaystream(self, cmd, fp, **args):
490 fh = None
490 fh = None
491 fp_ = None
491 fp_ = None
492 filename = None
492 filename = None
493 try:
493 try:
494 # dump bundle to disk
494 # dump bundle to disk
495 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
495 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
496 fh = os.fdopen(fd, r"wb")
496 fh = os.fdopen(fd, r"wb")
497 d = fp.read(4096)
497 d = fp.read(4096)
498 while d:
498 while d:
499 fh.write(d)
499 fh.write(d)
500 d = fp.read(4096)
500 d = fp.read(4096)
501 fh.close()
501 fh.close()
502 # start http push
502 # start http push
503 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
503 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
504 headers = {r'Content-Type': r'application/mercurial-0.1'}
504 headers = {r'Content-Type': r'application/mercurial-0.1'}
505 return self._callstream(cmd, data=fp_, headers=headers, **args)
505 return self._callstream(cmd, data=fp_, headers=headers, **args)
506 finally:
506 finally:
507 if fp_ is not None:
507 if fp_ is not None:
508 fp_.close()
508 fp_.close()
509 if fh is not None:
509 if fh is not None:
510 fh.close()
510 fh.close()
511 os.unlink(filename)
511 os.unlink(filename)
512
512
513 def _callcompressable(self, cmd, **args):
513 def _callcompressable(self, cmd, **args):
514 return self._callstream(cmd, _compressible=True, **args)
514 return self._callstream(cmd, _compressible=True, **args)
515
515
516 def _abort(self, exception):
516 def _abort(self, exception):
517 raise exception
517 raise exception
518
518
519 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
519 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests,
520 redirect):
520 redirect):
521 wireprotoframing.populatestreamencoders()
521 wireprotoframing.populatestreamencoders()
522
522
523 uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
523 uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order')
524
524
525 if uiencoders:
525 if uiencoders:
526 encoders = []
526 encoders = []
527
527
528 for encoder in uiencoders:
528 for encoder in uiencoders:
529 if encoder not in wireprotoframing.STREAM_ENCODERS:
529 if encoder not in wireprotoframing.STREAM_ENCODERS:
530 ui.warn(_(b'wire protocol version 2 encoder referenced in '
530 ui.warn(_(b'wire protocol version 2 encoder referenced in '
531 b'config (%s) is not known; ignoring\n') % encoder)
531 b'config (%s) is not known; ignoring\n') % encoder)
532 else:
532 else:
533 encoders.append(encoder)
533 encoders.append(encoder)
534
534
535 else:
535 else:
536 encoders = wireprotoframing.STREAM_ENCODERS_ORDER
536 encoders = wireprotoframing.STREAM_ENCODERS_ORDER
537
537
538 reactor = wireprotoframing.clientreactor(ui,
538 reactor = wireprotoframing.clientreactor(ui,
539 hasmultiplesend=False,
539 hasmultiplesend=False,
540 buffersends=True,
540 buffersends=True,
541 clientcontentencoders=encoders)
541 clientcontentencoders=encoders)
542
542
543 handler = wireprotov2peer.clienthandler(ui, reactor,
543 handler = wireprotov2peer.clienthandler(ui, reactor,
544 opener=opener,
544 opener=opener,
545 requestbuilder=requestbuilder)
545 requestbuilder=requestbuilder)
546
546
547 url = '%s/%s' % (apiurl, permission)
547 url = '%s/%s' % (apiurl, permission)
548
548
549 if len(requests) > 1:
549 if len(requests) > 1:
550 url += '/multirequest'
550 url += '/multirequest'
551 else:
551 else:
552 url += '/%s' % requests[0][0]
552 url += '/%s' % requests[0][0]
553
553
554 ui.debug('sending %d commands\n' % len(requests))
554 ui.debug('sending %d commands\n' % len(requests))
555 for command, args, f in requests:
555 for command, args, f in requests:
556 ui.debug('sending command %s: %s\n' % (
556 ui.debug('sending command %s: %s\n' % (
557 command, stringutil.pprint(args, indent=2)))
557 command, stringutil.pprint(args, indent=2)))
558 assert not list(handler.callcommand(command, args, f,
558 assert not list(handler.callcommand(command, args, f,
559 redirect=redirect))
559 redirect=redirect))
560
560
561 # TODO stream this.
561 # TODO stream this.
562 body = b''.join(map(bytes, handler.flushcommands()))
562 body = b''.join(map(bytes, handler.flushcommands()))
563
563
564 # TODO modify user-agent to reflect v2
564 # TODO modify user-agent to reflect v2
565 headers = {
565 headers = {
566 r'Accept': wireprotov2server.FRAMINGTYPE,
566 r'Accept': wireprotov2server.FRAMINGTYPE,
567 r'Content-Type': wireprotov2server.FRAMINGTYPE,
567 r'Content-Type': wireprotov2server.FRAMINGTYPE,
568 }
568 }
569
569
570 req = requestbuilder(pycompat.strurl(url), body, headers)
570 req = requestbuilder(pycompat.strurl(url), body, headers)
571 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
571 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
572
572
573 try:
573 try:
574 res = opener.open(req)
574 res = opener.open(req)
575 except urlerr.httperror as e:
575 except urlerr.httperror as e:
576 if e.code == 401:
576 if e.code == 401:
577 raise error.Abort(_('authorization failed'))
577 raise error.Abort(_('authorization failed'))
578
578
579 raise
579 raise
580 except httplib.HTTPException as e:
580 except httplib.HTTPException as e:
581 ui.traceback()
581 ui.traceback()
582 raise IOError(None, e)
582 raise IOError(None, e)
583
583
584 return handler, res
584 return handler, res
585
585
586 class queuedcommandfuture(pycompat.futures.Future):
586 class queuedcommandfuture(pycompat.futures.Future):
587 """Wraps result() on command futures to trigger submission on call."""
587 """Wraps result() on command futures to trigger submission on call."""
588
588
589 def result(self, timeout=None):
589 def result(self, timeout=None):
590 if self.done():
590 if self.done():
591 return pycompat.futures.Future.result(self, timeout)
591 return pycompat.futures.Future.result(self, timeout)
592
592
593 self._peerexecutor.sendcommands()
593 self._peerexecutor.sendcommands()
594
594
595 # sendcommands() will restore the original __class__ and self.result
595 # sendcommands() will restore the original __class__ and self.result
596 # will resolve to Future.result.
596 # will resolve to Future.result.
597 return self.result(timeout)
597 return self.result(timeout)
598
598
599 @interfaceutil.implementer(repository.ipeercommandexecutor)
599 @interfaceutil.implementer(repository.ipeercommandexecutor)
600 class httpv2executor(object):
600 class httpv2executor(object):
601 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
601 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor,
602 redirect):
602 redirect):
603 self._ui = ui
603 self._ui = ui
604 self._opener = opener
604 self._opener = opener
605 self._requestbuilder = requestbuilder
605 self._requestbuilder = requestbuilder
606 self._apiurl = apiurl
606 self._apiurl = apiurl
607 self._descriptor = descriptor
607 self._descriptor = descriptor
608 self._redirect = redirect
608 self._redirect = redirect
609 self._sent = False
609 self._sent = False
610 self._closed = False
610 self._closed = False
611 self._neededpermissions = set()
611 self._neededpermissions = set()
612 self._calls = []
612 self._calls = []
613 self._futures = weakref.WeakSet()
613 self._futures = weakref.WeakSet()
614 self._responseexecutor = None
614 self._responseexecutor = None
615 self._responsef = None
615 self._responsef = None
616
616
617 def __enter__(self):
617 def __enter__(self):
618 return self
618 return self
619
619
620 def __exit__(self, exctype, excvalue, exctb):
620 def __exit__(self, exctype, excvalue, exctb):
621 self.close()
621 self.close()
622
622
623 def callcommand(self, command, args):
623 def callcommand(self, command, args):
624 if self._sent:
624 if self._sent:
625 raise error.ProgrammingError('callcommand() cannot be used after '
625 raise error.ProgrammingError('callcommand() cannot be used after '
626 'commands are sent')
626 'commands are sent')
627
627
628 if self._closed:
628 if self._closed:
629 raise error.ProgrammingError('callcommand() cannot be used after '
629 raise error.ProgrammingError('callcommand() cannot be used after '
630 'close()')
630 'close()')
631
631
632 # The service advertises which commands are available. So if we attempt
632 # The service advertises which commands are available. So if we attempt
633 # to call an unknown command or pass an unknown argument, we can screen
633 # to call an unknown command or pass an unknown argument, we can screen
634 # for this.
634 # for this.
635 if command not in self._descriptor['commands']:
635 if command not in self._descriptor['commands']:
636 raise error.ProgrammingError(
636 raise error.ProgrammingError(
637 'wire protocol command %s is not available' % command)
637 'wire protocol command %s is not available' % command)
638
638
639 cmdinfo = self._descriptor['commands'][command]
639 cmdinfo = self._descriptor['commands'][command]
640 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
640 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
641
641
642 if unknownargs:
642 if unknownargs:
643 raise error.ProgrammingError(
643 raise error.ProgrammingError(
644 'wire protocol command %s does not accept argument: %s' % (
644 'wire protocol command %s does not accept argument: %s' % (
645 command, ', '.join(sorted(unknownargs))))
645 command, ', '.join(sorted(unknownargs))))
646
646
647 self._neededpermissions |= set(cmdinfo['permissions'])
647 self._neededpermissions |= set(cmdinfo['permissions'])
648
648
649 # TODO we /could/ also validate types here, since the API descriptor
649 # TODO we /could/ also validate types here, since the API descriptor
650 # includes types...
650 # includes types...
651
651
652 f = pycompat.futures.Future()
652 f = pycompat.futures.Future()
653
653
654 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
654 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
655 # could deadlock.
655 # could deadlock.
656 f.__class__ = queuedcommandfuture
656 f.__class__ = queuedcommandfuture
657 f._peerexecutor = self
657 f._peerexecutor = self
658
658
659 self._futures.add(f)
659 self._futures.add(f)
660 self._calls.append((command, args, f))
660 self._calls.append((command, args, f))
661
661
662 return f
662 return f
663
663
664 def sendcommands(self):
664 def sendcommands(self):
665 if self._sent:
665 if self._sent:
666 return
666 return
667
667
668 if not self._calls:
668 if not self._calls:
669 return
669 return
670
670
671 self._sent = True
671 self._sent = True
672
672
673 # Unhack any future types so caller sees a clean type and so we
673 # Unhack any future types so caller sees a clean type and so we
674 # break reference cycle.
674 # break reference cycle.
675 for f in self._futures:
675 for f in self._futures:
676 if isinstance(f, queuedcommandfuture):
676 if isinstance(f, queuedcommandfuture):
677 f.__class__ = pycompat.futures.Future
677 f.__class__ = pycompat.futures.Future
678 f._peerexecutor = None
678 f._peerexecutor = None
679
679
680 # Mark the future as running and filter out cancelled futures.
680 # Mark the future as running and filter out cancelled futures.
681 calls = [(command, args, f)
681 calls = [(command, args, f)
682 for command, args, f in self._calls
682 for command, args, f in self._calls
683 if f.set_running_or_notify_cancel()]
683 if f.set_running_or_notify_cancel()]
684
684
685 # Clear out references, prevent improper object usage.
685 # Clear out references, prevent improper object usage.
686 self._calls = None
686 self._calls = None
687
687
688 if not calls:
688 if not calls:
689 return
689 return
690
690
691 permissions = set(self._neededpermissions)
691 permissions = set(self._neededpermissions)
692
692
693 if 'push' in permissions and 'pull' in permissions:
693 if 'push' in permissions and 'pull' in permissions:
694 permissions.remove('pull')
694 permissions.remove('pull')
695
695
696 if len(permissions) > 1:
696 if len(permissions) > 1:
697 raise error.RepoError(_('cannot make request requiring multiple '
697 raise error.RepoError(_('cannot make request requiring multiple '
698 'permissions: %s') %
698 'permissions: %s') %
699 _(', ').join(sorted(permissions)))
699 _(', ').join(sorted(permissions)))
700
700
701 permission = {
701 permission = {
702 'push': 'rw',
702 'push': 'rw',
703 'pull': 'ro',
703 'pull': 'ro',
704 }[permissions.pop()]
704 }[permissions.pop()]
705
705
706 handler, resp = sendv2request(
706 handler, resp = sendv2request(
707 self._ui, self._opener, self._requestbuilder, self._apiurl,
707 self._ui, self._opener, self._requestbuilder, self._apiurl,
708 permission, calls, self._redirect)
708 permission, calls, self._redirect)
709
709
710 # TODO we probably want to validate the HTTP code, media type, etc.
710 # TODO we probably want to validate the HTTP code, media type, etc.
711
711
712 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
712 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
713 self._responsef = self._responseexecutor.submit(self._handleresponse,
713 self._responsef = self._responseexecutor.submit(self._handleresponse,
714 handler, resp)
714 handler, resp)
715
715
716 def close(self):
716 def close(self):
717 if self._closed:
717 if self._closed:
718 return
718 return
719
719
720 self.sendcommands()
720 self.sendcommands()
721
721
722 self._closed = True
722 self._closed = True
723
723
724 if not self._responsef:
724 if not self._responsef:
725 return
725 return
726
726
727 # TODO ^C here may not result in immediate program termination.
727 # TODO ^C here may not result in immediate program termination.
728
728
729 try:
729 try:
730 self._responsef.result()
730 self._responsef.result()
731 finally:
731 finally:
732 self._responseexecutor.shutdown(wait=True)
732 self._responseexecutor.shutdown(wait=True)
733 self._responsef = None
733 self._responsef = None
734 self._responseexecutor = None
734 self._responseexecutor = None
735
735
736 # If any of our futures are still in progress, mark them as
736 # If any of our futures are still in progress, mark them as
737 # errored, otherwise a result() could wait indefinitely.
737 # errored, otherwise a result() could wait indefinitely.
738 for f in self._futures:
738 for f in self._futures:
739 if not f.done():
739 if not f.done():
740 f.set_exception(error.ResponseError(
740 f.set_exception(error.ResponseError(
741 _('unfulfilled command response')))
741 _('unfulfilled command response')))
742
742
743 self._futures = None
743 self._futures = None
744
744
745 def _handleresponse(self, handler, resp):
745 def _handleresponse(self, handler, resp):
746 # Called in a thread to read the response.
746 # Called in a thread to read the response.
747
747
748 while handler.readdata(resp):
748 while handler.readdata(resp):
749 pass
749 pass
750
750
751 @interfaceutil.implementer(repository.ipeerv2)
751 @interfaceutil.implementer(repository.ipeerv2)
752 class httpv2peer(object):
752 class httpv2peer(object):
753 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
753 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
754 apidescriptor):
754 apidescriptor):
755 self.ui = ui
755 self.ui = ui
756 self.apidescriptor = apidescriptor
756 self.apidescriptor = apidescriptor
757
757
758 if repourl.endswith('/'):
758 if repourl.endswith('/'):
759 repourl = repourl[:-1]
759 repourl = repourl[:-1]
760
760
761 self._url = repourl
761 self._url = repourl
762 self._apipath = apipath
762 self._apipath = apipath
763 self._apiurl = '%s/%s' % (repourl, apipath)
763 self._apiurl = '%s/%s' % (repourl, apipath)
764 self._opener = opener
764 self._opener = opener
765 self._requestbuilder = requestbuilder
765 self._requestbuilder = requestbuilder
766
766
767 self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
767 self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor)
768
768
769 # Start of ipeerconnection.
769 # Start of ipeerconnection.
770
770
771 def url(self):
771 def url(self):
772 return self._url
772 return self._url
773
773
774 def local(self):
774 def local(self):
775 return None
775 return None
776
776
777 def peer(self):
777 def peer(self):
778 return self
778 return self
779
779
780 def canpush(self):
780 def canpush(self):
781 # TODO change once implemented.
781 # TODO change once implemented.
782 return False
782 return False
783
783
784 def close(self):
784 def close(self):
785 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
785 self.ui.note(_('(sent %d HTTP requests and %d bytes; '
786 'received %d bytes in responses)\n') %
786 'received %d bytes in responses)\n') %
787 (self._opener.requestscount,
787 (self._opener.requestscount,
788 self._opener.sentbytescount,
788 self._opener.sentbytescount,
789 self._opener.receivedbytescount))
789 self._opener.receivedbytescount))
790
790
791 # End of ipeerconnection.
791 # End of ipeerconnection.
792
792
793 # Start of ipeercapabilities.
793 # Start of ipeercapabilities.
794
794
795 def capable(self, name):
795 def capable(self, name):
796 # The capabilities used internally historically map to capabilities
796 # The capabilities used internally historically map to capabilities
797 # advertised from the "capabilities" wire protocol command. However,
797 # advertised from the "capabilities" wire protocol command. However,
798 # version 2 of that command works differently.
798 # version 2 of that command works differently.
799
799
800 # Maps to commands that are available.
800 # Maps to commands that are available.
801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
801 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
802 return True
802 return True
803
803
804 # Other concepts.
804 # Other concepts.
805 if name in ('bundle2'):
805 if name in ('bundle2'):
806 return True
806 return True
807
807
808 # Alias command-* to presence of command of that name.
808 # Alias command-* to presence of command of that name.
809 if name.startswith('command-'):
809 if name.startswith('command-'):
810 return name[len('command-'):] in self.apidescriptor['commands']
810 return name[len('command-'):] in self.apidescriptor['commands']
811
811
812 return False
812 return False
813
813
814 def requirecap(self, name, purpose):
814 def requirecap(self, name, purpose):
815 if self.capable(name):
815 if self.capable(name):
816 return
816 return
817
817
818 raise error.CapabilityError(
818 raise error.CapabilityError(
819 _('cannot %s; client or remote repository does not support the %r '
819 _('cannot %s; client or remote repository does not support the '
820 'capability') % (purpose, name))
820 '\'%s\' capability') % (purpose, name))
821
821
822 # End of ipeercapabilities.
822 # End of ipeercapabilities.
823
823
824 def _call(self, name, **args):
824 def _call(self, name, **args):
825 with self.commandexecutor() as e:
825 with self.commandexecutor() as e:
826 return e.callcommand(name, args).result()
826 return e.callcommand(name, args).result()
827
827
828 def commandexecutor(self):
828 def commandexecutor(self):
829 return httpv2executor(self.ui, self._opener, self._requestbuilder,
829 return httpv2executor(self.ui, self._opener, self._requestbuilder,
830 self._apiurl, self.apidescriptor, self._redirect)
830 self._apiurl, self.apidescriptor, self._redirect)
831
831
832 # Registry of API service names to metadata about peers that handle it.
832 # Registry of API service names to metadata about peers that handle it.
833 #
833 #
834 # The following keys are meaningful:
834 # The following keys are meaningful:
835 #
835 #
836 # init
836 # init
837 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
837 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
838 # apidescriptor) to create a peer.
838 # apidescriptor) to create a peer.
839 #
839 #
840 # priority
840 # priority
841 # Integer priority for the service. If we could choose from multiple
841 # Integer priority for the service. If we could choose from multiple
842 # services, we choose the one with the highest priority.
842 # services, we choose the one with the highest priority.
843 API_PEERS = {
843 API_PEERS = {
844 wireprototypes.HTTP_WIREPROTO_V2: {
844 wireprototypes.HTTP_WIREPROTO_V2: {
845 'init': httpv2peer,
845 'init': httpv2peer,
846 'priority': 50,
846 'priority': 50,
847 },
847 },
848 }
848 }
849
849
850 def performhandshake(ui, url, opener, requestbuilder):
850 def performhandshake(ui, url, opener, requestbuilder):
851 # The handshake is a request to the capabilities command.
851 # The handshake is a request to the capabilities command.
852
852
853 caps = None
853 caps = None
854 def capable(x):
854 def capable(x):
855 raise error.ProgrammingError('should not be called')
855 raise error.ProgrammingError('should not be called')
856
856
857 args = {}
857 args = {}
858
858
859 # The client advertises support for newer protocols by adding an
859 # The client advertises support for newer protocols by adding an
860 # X-HgUpgrade-* header with a list of supported APIs and an
860 # X-HgUpgrade-* header with a list of supported APIs and an
861 # X-HgProto-* header advertising which serializing formats it supports.
861 # X-HgProto-* header advertising which serializing formats it supports.
862 # We only support the HTTP version 2 transport and CBOR responses for
862 # We only support the HTTP version 2 transport and CBOR responses for
863 # now.
863 # now.
864 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
864 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
865
865
866 if advertisev2:
866 if advertisev2:
867 args['headers'] = {
867 args['headers'] = {
868 r'X-HgProto-1': r'cbor',
868 r'X-HgProto-1': r'cbor',
869 }
869 }
870
870
871 args['headers'].update(
871 args['headers'].update(
872 encodevalueinheaders(' '.join(sorted(API_PEERS)),
872 encodevalueinheaders(' '.join(sorted(API_PEERS)),
873 'X-HgUpgrade',
873 'X-HgUpgrade',
874 # We don't know the header limit this early.
874 # We don't know the header limit this early.
875 # So make it small.
875 # So make it small.
876 1024))
876 1024))
877
877
878 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
878 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
879 capable, url, 'capabilities',
879 capable, url, 'capabilities',
880 args)
880 args)
881 resp = sendrequest(ui, opener, req)
881 resp = sendrequest(ui, opener, req)
882
882
883 # The server may redirect us to the repo root, stripping the
883 # The server may redirect us to the repo root, stripping the
884 # ?cmd=capabilities query string from the URL. The server would likely
884 # ?cmd=capabilities query string from the URL. The server would likely
885 # return HTML in this case and ``parsev1commandresponse()`` would raise.
885 # return HTML in this case and ``parsev1commandresponse()`` would raise.
886 # We catch this special case and re-issue the capabilities request against
886 # We catch this special case and re-issue the capabilities request against
887 # the new URL.
887 # the new URL.
888 #
888 #
889 # We should ideally not do this, as a redirect that drops the query
889 # We should ideally not do this, as a redirect that drops the query
890 # string from the URL is arguably a server bug. (Garbage in, garbage out).
890 # string from the URL is arguably a server bug. (Garbage in, garbage out).
891 # However, Mercurial clients for several years appeared to handle this
891 # However, Mercurial clients for several years appeared to handle this
892 # issue without behavior degradation. And according to issue 5860, it may
892 # issue without behavior degradation. And according to issue 5860, it may
893 # be a longstanding bug in some server implementations. So we allow a
893 # be a longstanding bug in some server implementations. So we allow a
894 # redirect that drops the query string to "just work."
894 # redirect that drops the query string to "just work."
895 try:
895 try:
896 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
896 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
897 compressible=False,
897 compressible=False,
898 allowcbor=advertisev2)
898 allowcbor=advertisev2)
899 except RedirectedRepoError as e:
899 except RedirectedRepoError as e:
900 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
900 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
901 capable, e.respurl,
901 capable, e.respurl,
902 'capabilities', args)
902 'capabilities', args)
903 resp = sendrequest(ui, opener, req)
903 resp = sendrequest(ui, opener, req)
904 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
904 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
905 compressible=False,
905 compressible=False,
906 allowcbor=advertisev2)
906 allowcbor=advertisev2)
907
907
908 try:
908 try:
909 rawdata = resp.read()
909 rawdata = resp.read()
910 finally:
910 finally:
911 resp.close()
911 resp.close()
912
912
913 if not ct.startswith('application/mercurial-'):
913 if not ct.startswith('application/mercurial-'):
914 raise error.ProgrammingError('unexpected content-type: %s' % ct)
914 raise error.ProgrammingError('unexpected content-type: %s' % ct)
915
915
916 if advertisev2:
916 if advertisev2:
917 if ct == 'application/mercurial-cbor':
917 if ct == 'application/mercurial-cbor':
918 try:
918 try:
919 info = cborutil.decodeall(rawdata)[0]
919 info = cborutil.decodeall(rawdata)[0]
920 except cborutil.CBORDecodeError:
920 except cborutil.CBORDecodeError:
921 raise error.Abort(_('error decoding CBOR from remote server'),
921 raise error.Abort(_('error decoding CBOR from remote server'),
922 hint=_('try again and consider contacting '
922 hint=_('try again and consider contacting '
923 'the server operator'))
923 'the server operator'))
924
924
925 # We got a legacy response. That's fine.
925 # We got a legacy response. That's fine.
926 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
926 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
927 info = {
927 info = {
928 'v1capabilities': set(rawdata.split())
928 'v1capabilities': set(rawdata.split())
929 }
929 }
930
930
931 else:
931 else:
932 raise error.RepoError(
932 raise error.RepoError(
933 _('unexpected response type from server: %s') % ct)
933 _('unexpected response type from server: %s') % ct)
934 else:
934 else:
935 info = {
935 info = {
936 'v1capabilities': set(rawdata.split())
936 'v1capabilities': set(rawdata.split())
937 }
937 }
938
938
939 return respurl, info
939 return respurl, info
940
940
941 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
941 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
942 """Construct an appropriate HTTP peer instance.
942 """Construct an appropriate HTTP peer instance.
943
943
944 ``opener`` is an ``url.opener`` that should be used to establish
944 ``opener`` is an ``url.opener`` that should be used to establish
945 connections, perform HTTP requests.
945 connections, perform HTTP requests.
946
946
947 ``requestbuilder`` is the type used for constructing HTTP requests.
947 ``requestbuilder`` is the type used for constructing HTTP requests.
948 It exists as an argument so extensions can override the default.
948 It exists as an argument so extensions can override the default.
949 """
949 """
950 u = util.url(path)
950 u = util.url(path)
951 if u.query or u.fragment:
951 if u.query or u.fragment:
952 raise error.Abort(_('unsupported URL component: "%s"') %
952 raise error.Abort(_('unsupported URL component: "%s"') %
953 (u.query or u.fragment))
953 (u.query or u.fragment))
954
954
955 # urllib cannot handle URLs with embedded user or passwd.
955 # urllib cannot handle URLs with embedded user or passwd.
956 url, authinfo = u.authinfo()
956 url, authinfo = u.authinfo()
957 ui.debug('using %s\n' % url)
957 ui.debug('using %s\n' % url)
958
958
959 opener = opener or urlmod.opener(ui, authinfo)
959 opener = opener or urlmod.opener(ui, authinfo)
960
960
961 respurl, info = performhandshake(ui, url, opener, requestbuilder)
961 respurl, info = performhandshake(ui, url, opener, requestbuilder)
962
962
963 # Given the intersection of APIs that both we and the server support,
963 # Given the intersection of APIs that both we and the server support,
964 # sort by their advertised priority and pick the first one.
964 # sort by their advertised priority and pick the first one.
965 #
965 #
966 # TODO consider making this request-based and interface driven. For
966 # TODO consider making this request-based and interface driven. For
967 # example, the caller could say "I want a peer that does X." It's quite
967 # example, the caller could say "I want a peer that does X." It's quite
968 # possible that not all peers would do that. Since we know the service
968 # possible that not all peers would do that. Since we know the service
969 # capabilities, we could filter out services not meeting the
969 # capabilities, we could filter out services not meeting the
970 # requirements. Possibly by consulting the interfaces defined by the
970 # requirements. Possibly by consulting the interfaces defined by the
971 # peer type.
971 # peer type.
972 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
972 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
973
973
974 preferredchoices = sorted(apipeerchoices,
974 preferredchoices = sorted(apipeerchoices,
975 key=lambda x: API_PEERS[x]['priority'],
975 key=lambda x: API_PEERS[x]['priority'],
976 reverse=True)
976 reverse=True)
977
977
978 for service in preferredchoices:
978 for service in preferredchoices:
979 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
979 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
980
980
981 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
981 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
982 requestbuilder,
982 requestbuilder,
983 info['apis'][service])
983 info['apis'][service])
984
984
985 # Failed to construct an API peer. Fall back to legacy.
985 # Failed to construct an API peer. Fall back to legacy.
986 return httppeer(ui, path, respurl, opener, requestbuilder,
986 return httppeer(ui, path, respurl, opener, requestbuilder,
987 info['v1capabilities'])
987 info['v1capabilities'])
988
988
989 def instance(ui, path, create, intents=None, createopts=None):
989 def instance(ui, path, create, intents=None, createopts=None):
990 if create:
990 if create:
991 raise error.Abort(_('cannot create new http repository'))
991 raise error.Abort(_('cannot create new http repository'))
992 try:
992 try:
993 if path.startswith('https:') and not urlmod.has_https:
993 if path.startswith('https:') and not urlmod.has_https:
994 raise error.Abort(_('Python support for SSL and HTTPS '
994 raise error.Abort(_('Python support for SSL and HTTPS '
995 'is not installed'))
995 'is not installed'))
996
996
997 inst = makepeer(ui, path)
997 inst = makepeer(ui, path)
998
998
999 return inst
999 return inst
1000 except error.RepoError as httpexception:
1000 except error.RepoError as httpexception:
1001 try:
1001 try:
1002 r = statichttprepo.instance(ui, "static-" + path, create)
1002 r = statichttprepo.instance(ui, "static-" + path, create)
1003 ui.note(_('(falling back to static-http)\n'))
1003 ui.note(_('(falling back to static-http)\n'))
1004 return r
1004 return r
1005 except error.RepoError:
1005 except error.RepoError:
1006 raise httpexception # use the original http RepoError instead
1006 raise httpexception # use the original http RepoError instead
@@ -1,1864 +1,1864 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
34
35 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
38
39 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
41
42 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
45 CG_DELTAMODE_P1 = b'p1'
46
46
47 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
49
49
50 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
52
52
53 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
54 outside of this module.
54 outside of this module.
55 """
55 """
56 ui = interfaceutil.Attribute("""ui.ui instance""")
56 ui = interfaceutil.Attribute("""ui.ui instance""")
57
57
58 def url():
58 def url():
59 """Returns a URL string representing this peer.
59 """Returns a URL string representing this peer.
60
60
61 Currently, implementations expose the raw URL used to construct the
61 Currently, implementations expose the raw URL used to construct the
62 instance. It may contain credentials as part of the URL. The
62 instance. It may contain credentials as part of the URL. The
63 expectations of the value aren't well-defined and this could lead to
63 expectations of the value aren't well-defined and this could lead to
64 data leakage.
64 data leakage.
65
65
66 TODO audit/clean consumers and more clearly define the contents of this
66 TODO audit/clean consumers and more clearly define the contents of this
67 value.
67 value.
68 """
68 """
69
69
70 def local():
70 def local():
71 """Returns a local repository instance.
71 """Returns a local repository instance.
72
72
73 If the peer represents a local repository, returns an object that
73 If the peer represents a local repository, returns an object that
74 can be used to interface with it. Otherwise returns ``None``.
74 can be used to interface with it. Otherwise returns ``None``.
75 """
75 """
76
76
77 def peer():
77 def peer():
78 """Returns an object conforming to this interface.
78 """Returns an object conforming to this interface.
79
79
80 Most implementations will ``return self``.
80 Most implementations will ``return self``.
81 """
81 """
82
82
83 def canpush():
83 def canpush():
84 """Returns a boolean indicating if this peer can be pushed to."""
84 """Returns a boolean indicating if this peer can be pushed to."""
85
85
86 def close():
86 def close():
87 """Close the connection to this peer.
87 """Close the connection to this peer.
88
88
89 This is called when the peer will no longer be used. Resources
89 This is called when the peer will no longer be used. Resources
90 associated with the peer should be cleaned up.
90 associated with the peer should be cleaned up.
91 """
91 """
92
92
93 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
95
95
96 def capable(name):
96 def capable(name):
97 """Determine support for a named capability.
97 """Determine support for a named capability.
98
98
99 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
100
100
101 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
102 if capability support is non-boolean.
103
103
104 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
105 """
105 """
106
106
107 def requirecap(name, purpose):
107 def requirecap(name, purpose):
108 """Require a capability to be present.
108 """Require a capability to be present.
109
109
110 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
111 """
112
112
113 class ipeercommands(interfaceutil.Interface):
113 class ipeercommands(interfaceutil.Interface):
114 """Client-side interface for communicating over the wire protocol.
114 """Client-side interface for communicating over the wire protocol.
115
115
116 This interface is used as a gateway to the Mercurial wire protocol.
116 This interface is used as a gateway to the Mercurial wire protocol.
117 methods commonly call wire protocol commands of the same name.
117 methods commonly call wire protocol commands of the same name.
118 """
118 """
119
119
120 def branchmap():
120 def branchmap():
121 """Obtain heads in named branches.
121 """Obtain heads in named branches.
122
122
123 Returns a dict mapping branch name to an iterable of nodes that are
123 Returns a dict mapping branch name to an iterable of nodes that are
124 heads on that branch.
124 heads on that branch.
125 """
125 """
126
126
127 def capabilities():
127 def capabilities():
128 """Obtain capabilities of the peer.
128 """Obtain capabilities of the peer.
129
129
130 Returns a set of string capabilities.
130 Returns a set of string capabilities.
131 """
131 """
132
132
133 def clonebundles():
133 def clonebundles():
134 """Obtains the clone bundles manifest for the repo.
134 """Obtains the clone bundles manifest for the repo.
135
135
136 Returns the manifest as unparsed bytes.
136 Returns the manifest as unparsed bytes.
137 """
137 """
138
138
139 def debugwireargs(one, two, three=None, four=None, five=None):
139 def debugwireargs(one, two, three=None, four=None, five=None):
140 """Used to facilitate debugging of arguments passed over the wire."""
140 """Used to facilitate debugging of arguments passed over the wire."""
141
141
142 def getbundle(source, **kwargs):
142 def getbundle(source, **kwargs):
143 """Obtain remote repository data as a bundle.
143 """Obtain remote repository data as a bundle.
144
144
145 This command is how the bulk of repository data is transferred from
145 This command is how the bulk of repository data is transferred from
146 the peer to the local repository
146 the peer to the local repository
147
147
148 Returns a generator of bundle data.
148 Returns a generator of bundle data.
149 """
149 """
150
150
151 def heads():
151 def heads():
152 """Determine all known head revisions in the peer.
152 """Determine all known head revisions in the peer.
153
153
154 Returns an iterable of binary nodes.
154 Returns an iterable of binary nodes.
155 """
155 """
156
156
157 def known(nodes):
157 def known(nodes):
158 """Determine whether multiple nodes are known.
158 """Determine whether multiple nodes are known.
159
159
160 Accepts an iterable of nodes whose presence to check for.
160 Accepts an iterable of nodes whose presence to check for.
161
161
162 Returns an iterable of booleans indicating of the corresponding node
162 Returns an iterable of booleans indicating of the corresponding node
163 at that index is known to the peer.
163 at that index is known to the peer.
164 """
164 """
165
165
166 def listkeys(namespace):
166 def listkeys(namespace):
167 """Obtain all keys in a pushkey namespace.
167 """Obtain all keys in a pushkey namespace.
168
168
169 Returns an iterable of key names.
169 Returns an iterable of key names.
170 """
170 """
171
171
172 def lookup(key):
172 def lookup(key):
173 """Resolve a value to a known revision.
173 """Resolve a value to a known revision.
174
174
175 Returns a binary node of the resolved revision on success.
175 Returns a binary node of the resolved revision on success.
176 """
176 """
177
177
178 def pushkey(namespace, key, old, new):
178 def pushkey(namespace, key, old, new):
179 """Set a value using the ``pushkey`` protocol.
179 """Set a value using the ``pushkey`` protocol.
180
180
181 Arguments correspond to the pushkey namespace and key to operate on and
181 Arguments correspond to the pushkey namespace and key to operate on and
182 the old and new values for that key.
182 the old and new values for that key.
183
183
184 Returns a string with the peer result. The value inside varies by the
184 Returns a string with the peer result. The value inside varies by the
185 namespace.
185 namespace.
186 """
186 """
187
187
188 def stream_out():
188 def stream_out():
189 """Obtain streaming clone data.
189 """Obtain streaming clone data.
190
190
191 Successful result should be a generator of data chunks.
191 Successful result should be a generator of data chunks.
192 """
192 """
193
193
194 def unbundle(bundle, heads, url):
194 def unbundle(bundle, heads, url):
195 """Transfer repository data to the peer.
195 """Transfer repository data to the peer.
196
196
197 This is how the bulk of data during a push is transferred.
197 This is how the bulk of data during a push is transferred.
198
198
199 Returns the integer number of heads added to the peer.
199 Returns the integer number of heads added to the peer.
200 """
200 """
201
201
202 class ipeerlegacycommands(interfaceutil.Interface):
202 class ipeerlegacycommands(interfaceutil.Interface):
203 """Interface for implementing support for legacy wire protocol commands.
203 """Interface for implementing support for legacy wire protocol commands.
204
204
205 Wire protocol commands transition to legacy status when they are no longer
205 Wire protocol commands transition to legacy status when they are no longer
206 used by modern clients. To facilitate identifying which commands are
206 used by modern clients. To facilitate identifying which commands are
207 legacy, the interfaces are split.
207 legacy, the interfaces are split.
208 """
208 """
209
209
210 def between(pairs):
210 def between(pairs):
211 """Obtain nodes between pairs of nodes.
211 """Obtain nodes between pairs of nodes.
212
212
213 ``pairs`` is an iterable of node pairs.
213 ``pairs`` is an iterable of node pairs.
214
214
215 Returns an iterable of iterables of nodes corresponding to each
215 Returns an iterable of iterables of nodes corresponding to each
216 requested pair.
216 requested pair.
217 """
217 """
218
218
219 def branches(nodes):
219 def branches(nodes):
220 """Obtain ancestor changesets of specific nodes back to a branch point.
220 """Obtain ancestor changesets of specific nodes back to a branch point.
221
221
222 For each requested node, the peer finds the first ancestor node that is
222 For each requested node, the peer finds the first ancestor node that is
223 a DAG root or is a merge.
223 a DAG root or is a merge.
224
224
225 Returns an iterable of iterables with the resolved values for each node.
225 Returns an iterable of iterables with the resolved values for each node.
226 """
226 """
227
227
228 def changegroup(nodes, source):
228 def changegroup(nodes, source):
229 """Obtain a changegroup with data for descendants of specified nodes."""
229 """Obtain a changegroup with data for descendants of specified nodes."""
230
230
231 def changegroupsubset(bases, heads, source):
231 def changegroupsubset(bases, heads, source):
232 pass
232 pass
233
233
234 class ipeercommandexecutor(interfaceutil.Interface):
234 class ipeercommandexecutor(interfaceutil.Interface):
235 """Represents a mechanism to execute remote commands.
235 """Represents a mechanism to execute remote commands.
236
236
237 This is the primary interface for requesting that wire protocol commands
237 This is the primary interface for requesting that wire protocol commands
238 be executed. Instances of this interface are active in a context manager
238 be executed. Instances of this interface are active in a context manager
239 and have a well-defined lifetime. When the context manager exits, all
239 and have a well-defined lifetime. When the context manager exits, all
240 outstanding requests are waited on.
240 outstanding requests are waited on.
241 """
241 """
242
242
243 def callcommand(name, args):
243 def callcommand(name, args):
244 """Request that a named command be executed.
244 """Request that a named command be executed.
245
245
246 Receives the command name and a dictionary of command arguments.
246 Receives the command name and a dictionary of command arguments.
247
247
248 Returns a ``concurrent.futures.Future`` that will resolve to the
248 Returns a ``concurrent.futures.Future`` that will resolve to the
249 result of that command request. That exact value is left up to
249 result of that command request. That exact value is left up to
250 the implementation and possibly varies by command.
250 the implementation and possibly varies by command.
251
251
252 Not all commands can coexist with other commands in an executor
252 Not all commands can coexist with other commands in an executor
253 instance: it depends on the underlying wire protocol transport being
253 instance: it depends on the underlying wire protocol transport being
254 used and the command itself.
254 used and the command itself.
255
255
256 Implementations MAY call ``sendcommands()`` automatically if the
256 Implementations MAY call ``sendcommands()`` automatically if the
257 requested command can not coexist with other commands in this executor.
257 requested command can not coexist with other commands in this executor.
258
258
259 Implementations MAY call ``sendcommands()`` automatically when the
259 Implementations MAY call ``sendcommands()`` automatically when the
260 future's ``result()`` is called. So, consumers using multiple
260 future's ``result()`` is called. So, consumers using multiple
261 commands with an executor MUST ensure that ``result()`` is not called
261 commands with an executor MUST ensure that ``result()`` is not called
262 until all command requests have been issued.
262 until all command requests have been issued.
263 """
263 """
264
264
265 def sendcommands():
265 def sendcommands():
266 """Trigger submission of queued command requests.
266 """Trigger submission of queued command requests.
267
267
268 Not all transports submit commands as soon as they are requested to
268 Not all transports submit commands as soon as they are requested to
269 run. When called, this method forces queued command requests to be
269 run. When called, this method forces queued command requests to be
270 issued. It will no-op if all commands have already been sent.
270 issued. It will no-op if all commands have already been sent.
271
271
272 When called, no more new commands may be issued with this executor.
272 When called, no more new commands may be issued with this executor.
273 """
273 """
274
274
275 def close():
275 def close():
276 """Signal that this command request is finished.
276 """Signal that this command request is finished.
277
277
278 When called, no more new commands may be issued. All outstanding
278 When called, no more new commands may be issued. All outstanding
279 commands that have previously been issued are waited on before
279 commands that have previously been issued are waited on before
280 returning. This not only includes waiting for the futures to resolve,
280 returning. This not only includes waiting for the futures to resolve,
281 but also waiting for all response data to arrive. In other words,
281 but also waiting for all response data to arrive. In other words,
282 calling this waits for all on-wire state for issued command requests
282 calling this waits for all on-wire state for issued command requests
283 to finish.
283 to finish.
284
284
285 When used as a context manager, this method is called when exiting the
285 When used as a context manager, this method is called when exiting the
286 context manager.
286 context manager.
287
287
288 This method may call ``sendcommands()`` if there are buffered commands.
288 This method may call ``sendcommands()`` if there are buffered commands.
289 """
289 """
290
290
291 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
293
293
294 def commandexecutor():
294 def commandexecutor():
295 """A context manager that resolves to an ipeercommandexecutor.
295 """A context manager that resolves to an ipeercommandexecutor.
296
296
297 The object this resolves to can be used to issue command requests
297 The object this resolves to can be used to issue command requests
298 to the peer.
298 to the peer.
299
299
300 Callers should call its ``callcommand`` method to issue command
300 Callers should call its ``callcommand`` method to issue command
301 requests.
301 requests.
302
302
303 A new executor should be obtained for each distinct set of commands
303 A new executor should be obtained for each distinct set of commands
304 (possibly just a single command) that the consumer wants to execute
304 (possibly just a single command) that the consumer wants to execute
305 as part of a single operation or round trip. This is because some
305 as part of a single operation or round trip. This is because some
306 peers are half-duplex and/or don't support persistent connections.
306 peers are half-duplex and/or don't support persistent connections.
307 e.g. in the case of HTTP peers, commands sent to an executor represent
307 e.g. in the case of HTTP peers, commands sent to an executor represent
308 a single HTTP request. While some peers may support multiple command
308 a single HTTP request. While some peers may support multiple command
309 sends over the wire per executor, consumers need to code to the least
309 sends over the wire per executor, consumers need to code to the least
310 capable peer. So it should be assumed that command executors buffer
310 capable peer. So it should be assumed that command executors buffer
311 called commands until they are told to send them and that each
311 called commands until they are told to send them and that each
312 command executor could result in a new connection or wire-level request
312 command executor could result in a new connection or wire-level request
313 being issued.
313 being issued.
314 """
314 """
315
315
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
317 """Unified interface for peer repositories.
317 """Unified interface for peer repositories.
318
318
319 All peer instances must conform to this interface.
319 All peer instances must conform to this interface.
320 """
320 """
321
321
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
323 """Unified peer interface for wire protocol version 2 peers."""
323 """Unified peer interface for wire protocol version 2 peers."""
324
324
325 apidescriptor = interfaceutil.Attribute(
325 apidescriptor = interfaceutil.Attribute(
326 """Data structure holding description of server API.""")
326 """Data structure holding description of server API.""")
327
327
328 @interfaceutil.implementer(ipeerbase)
328 @interfaceutil.implementer(ipeerbase)
329 class peer(object):
329 class peer(object):
330 """Base class for peer repositories."""
330 """Base class for peer repositories."""
331
331
332 def capable(self, name):
332 def capable(self, name):
333 caps = self.capabilities()
333 caps = self.capabilities()
334 if name in caps:
334 if name in caps:
335 return True
335 return True
336
336
337 name = '%s=' % name
337 name = '%s=' % name
338 for cap in caps:
338 for cap in caps:
339 if cap.startswith(name):
339 if cap.startswith(name):
340 return cap[len(name):]
340 return cap[len(name):]
341
341
342 return False
342 return False
343
343
344 def requirecap(self, name, purpose):
344 def requirecap(self, name, purpose):
345 if self.capable(name):
345 if self.capable(name):
346 return
346 return
347
347
348 raise error.CapabilityError(
348 raise error.CapabilityError(
349 _('cannot %s; remote repository does not support the %r '
349 _('cannot %s; remote repository does not support the '
350 'capability') % (purpose, name))
350 '\'%s\' capability') % (purpose, name))
351
351
352 class iverifyproblem(interfaceutil.Interface):
352 class iverifyproblem(interfaceutil.Interface):
353 """Represents a problem with the integrity of the repository.
353 """Represents a problem with the integrity of the repository.
354
354
355 Instances of this interface are emitted to describe an integrity issue
355 Instances of this interface are emitted to describe an integrity issue
356 with a repository (e.g. corrupt storage, missing data, etc).
356 with a repository (e.g. corrupt storage, missing data, etc).
357
357
358 Instances are essentially messages associated with severity.
358 Instances are essentially messages associated with severity.
359 """
359 """
360 warning = interfaceutil.Attribute(
360 warning = interfaceutil.Attribute(
361 """Message indicating a non-fatal problem.""")
361 """Message indicating a non-fatal problem.""")
362
362
363 error = interfaceutil.Attribute(
363 error = interfaceutil.Attribute(
364 """Message indicating a fatal problem.""")
364 """Message indicating a fatal problem.""")
365
365
366 node = interfaceutil.Attribute(
366 node = interfaceutil.Attribute(
367 """Revision encountering the problem.
367 """Revision encountering the problem.
368
368
369 ``None`` means the problem doesn't apply to a single revision.
369 ``None`` means the problem doesn't apply to a single revision.
370 """)
370 """)
371
371
372 class irevisiondelta(interfaceutil.Interface):
372 class irevisiondelta(interfaceutil.Interface):
373 """Represents a delta between one revision and another.
373 """Represents a delta between one revision and another.
374
374
375 Instances convey enough information to allow a revision to be exchanged
375 Instances convey enough information to allow a revision to be exchanged
376 with another repository.
376 with another repository.
377
377
378 Instances represent the fulltext revision data or a delta against
378 Instances represent the fulltext revision data or a delta against
379 another revision. Therefore the ``revision`` and ``delta`` attributes
379 another revision. Therefore the ``revision`` and ``delta`` attributes
380 are mutually exclusive.
380 are mutually exclusive.
381
381
382 Typically used for changegroup generation.
382 Typically used for changegroup generation.
383 """
383 """
384
384
385 node = interfaceutil.Attribute(
385 node = interfaceutil.Attribute(
386 """20 byte node of this revision.""")
386 """20 byte node of this revision.""")
387
387
388 p1node = interfaceutil.Attribute(
388 p1node = interfaceutil.Attribute(
389 """20 byte node of 1st parent of this revision.""")
389 """20 byte node of 1st parent of this revision.""")
390
390
391 p2node = interfaceutil.Attribute(
391 p2node = interfaceutil.Attribute(
392 """20 byte node of 2nd parent of this revision.""")
392 """20 byte node of 2nd parent of this revision.""")
393
393
394 linknode = interfaceutil.Attribute(
394 linknode = interfaceutil.Attribute(
395 """20 byte node of the changelog revision this node is linked to.""")
395 """20 byte node of the changelog revision this node is linked to.""")
396
396
397 flags = interfaceutil.Attribute(
397 flags = interfaceutil.Attribute(
398 """2 bytes of integer flags that apply to this revision.
398 """2 bytes of integer flags that apply to this revision.
399
399
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
401 """)
401 """)
402
402
403 basenode = interfaceutil.Attribute(
403 basenode = interfaceutil.Attribute(
404 """20 byte node of the revision this data is a delta against.
404 """20 byte node of the revision this data is a delta against.
405
405
406 ``nullid`` indicates that the revision is a full revision and not
406 ``nullid`` indicates that the revision is a full revision and not
407 a delta.
407 a delta.
408 """)
408 """)
409
409
410 baserevisionsize = interfaceutil.Attribute(
410 baserevisionsize = interfaceutil.Attribute(
411 """Size of base revision this delta is against.
411 """Size of base revision this delta is against.
412
412
413 May be ``None`` if ``basenode`` is ``nullid``.
413 May be ``None`` if ``basenode`` is ``nullid``.
414 """)
414 """)
415
415
416 revision = interfaceutil.Attribute(
416 revision = interfaceutil.Attribute(
417 """Raw fulltext of revision data for this node.""")
417 """Raw fulltext of revision data for this node.""")
418
418
419 delta = interfaceutil.Attribute(
419 delta = interfaceutil.Attribute(
420 """Delta between ``basenode`` and ``node``.
420 """Delta between ``basenode`` and ``node``.
421
421
422 Stored in the bdiff delta format.
422 Stored in the bdiff delta format.
423 """)
423 """)
424
424
425 class ifilerevisionssequence(interfaceutil.Interface):
425 class ifilerevisionssequence(interfaceutil.Interface):
426 """Contains index data for all revisions of a file.
426 """Contains index data for all revisions of a file.
427
427
428 Types implementing this behave like lists of tuples. The index
428 Types implementing this behave like lists of tuples. The index
429 in the list corresponds to the revision number. The values contain
429 in the list corresponds to the revision number. The values contain
430 index metadata.
430 index metadata.
431
431
432 The *null* revision (revision number -1) is always the last item
432 The *null* revision (revision number -1) is always the last item
433 in the index.
433 in the index.
434 """
434 """
435
435
436 def __len__():
436 def __len__():
437 """The total number of revisions."""
437 """The total number of revisions."""
438
438
439 def __getitem__(rev):
439 def __getitem__(rev):
440 """Returns the object having a specific revision number.
440 """Returns the object having a specific revision number.
441
441
442 Returns an 8-tuple with the following fields:
442 Returns an 8-tuple with the following fields:
443
443
444 offset+flags
444 offset+flags
445 Contains the offset and flags for the revision. 64-bit unsigned
445 Contains the offset and flags for the revision. 64-bit unsigned
446 integer where first 6 bytes are the offset and the next 2 bytes
446 integer where first 6 bytes are the offset and the next 2 bytes
447 are flags. The offset can be 0 if it is not used by the store.
447 are flags. The offset can be 0 if it is not used by the store.
448 compressed size
448 compressed size
449 Size of the revision data in the store. It can be 0 if it isn't
449 Size of the revision data in the store. It can be 0 if it isn't
450 needed by the store.
450 needed by the store.
451 uncompressed size
451 uncompressed size
452 Fulltext size. It can be 0 if it isn't needed by the store.
452 Fulltext size. It can be 0 if it isn't needed by the store.
453 base revision
453 base revision
454 Revision number of revision the delta for storage is encoded
454 Revision number of revision the delta for storage is encoded
455 against. -1 indicates not encoded against a base revision.
455 against. -1 indicates not encoded against a base revision.
456 link revision
456 link revision
457 Revision number of changelog revision this entry is related to.
457 Revision number of changelog revision this entry is related to.
458 p1 revision
458 p1 revision
459 Revision number of 1st parent. -1 if no 1st parent.
459 Revision number of 1st parent. -1 if no 1st parent.
460 p2 revision
460 p2 revision
461 Revision number of 2nd parent. -1 if no 1st parent.
461 Revision number of 2nd parent. -1 if no 1st parent.
462 node
462 node
463 Binary node value for this revision number.
463 Binary node value for this revision number.
464
464
465 Negative values should index off the end of the sequence. ``-1``
465 Negative values should index off the end of the sequence. ``-1``
466 should return the null revision. ``-2`` should return the most
466 should return the null revision. ``-2`` should return the most
467 recent revision.
467 recent revision.
468 """
468 """
469
469
470 def __contains__(rev):
470 def __contains__(rev):
471 """Whether a revision number exists."""
471 """Whether a revision number exists."""
472
472
473 def insert(self, i, entry):
473 def insert(self, i, entry):
474 """Add an item to the index at specific revision."""
474 """Add an item to the index at specific revision."""
475
475
476 class ifileindex(interfaceutil.Interface):
476 class ifileindex(interfaceutil.Interface):
477 """Storage interface for index data of a single file.
477 """Storage interface for index data of a single file.
478
478
479 File storage data is divided into index metadata and data storage.
479 File storage data is divided into index metadata and data storage.
480 This interface defines the index portion of the interface.
480 This interface defines the index portion of the interface.
481
481
482 The index logically consists of:
482 The index logically consists of:
483
483
484 * A mapping between revision numbers and nodes.
484 * A mapping between revision numbers and nodes.
485 * DAG data (storing and querying the relationship between nodes).
485 * DAG data (storing and querying the relationship between nodes).
486 * Metadata to facilitate storage.
486 * Metadata to facilitate storage.
487 """
487 """
488 def __len__():
488 def __len__():
489 """Obtain the number of revisions stored for this file."""
489 """Obtain the number of revisions stored for this file."""
490
490
491 def __iter__():
491 def __iter__():
492 """Iterate over revision numbers for this file."""
492 """Iterate over revision numbers for this file."""
493
493
494 def hasnode(node):
494 def hasnode(node):
495 """Returns a bool indicating if a node is known to this store.
495 """Returns a bool indicating if a node is known to this store.
496
496
497 Implementations must only return True for full, binary node values:
497 Implementations must only return True for full, binary node values:
498 hex nodes, revision numbers, and partial node matches must be
498 hex nodes, revision numbers, and partial node matches must be
499 rejected.
499 rejected.
500
500
501 The null node is never present.
501 The null node is never present.
502 """
502 """
503
503
504 def revs(start=0, stop=None):
504 def revs(start=0, stop=None):
505 """Iterate over revision numbers for this file, with control."""
505 """Iterate over revision numbers for this file, with control."""
506
506
507 def parents(node):
507 def parents(node):
508 """Returns a 2-tuple of parent nodes for a revision.
508 """Returns a 2-tuple of parent nodes for a revision.
509
509
510 Values will be ``nullid`` if the parent is empty.
510 Values will be ``nullid`` if the parent is empty.
511 """
511 """
512
512
513 def parentrevs(rev):
513 def parentrevs(rev):
514 """Like parents() but operates on revision numbers."""
514 """Like parents() but operates on revision numbers."""
515
515
516 def rev(node):
516 def rev(node):
517 """Obtain the revision number given a node.
517 """Obtain the revision number given a node.
518
518
519 Raises ``error.LookupError`` if the node is not known.
519 Raises ``error.LookupError`` if the node is not known.
520 """
520 """
521
521
522 def node(rev):
522 def node(rev):
523 """Obtain the node value given a revision number.
523 """Obtain the node value given a revision number.
524
524
525 Raises ``IndexError`` if the node is not known.
525 Raises ``IndexError`` if the node is not known.
526 """
526 """
527
527
528 def lookup(node):
528 def lookup(node):
529 """Attempt to resolve a value to a node.
529 """Attempt to resolve a value to a node.
530
530
531 Value can be a binary node, hex node, revision number, or a string
531 Value can be a binary node, hex node, revision number, or a string
532 that can be converted to an integer.
532 that can be converted to an integer.
533
533
534 Raises ``error.LookupError`` if a node could not be resolved.
534 Raises ``error.LookupError`` if a node could not be resolved.
535 """
535 """
536
536
537 def linkrev(rev):
537 def linkrev(rev):
538 """Obtain the changeset revision number a revision is linked to."""
538 """Obtain the changeset revision number a revision is linked to."""
539
539
540 def iscensored(rev):
540 def iscensored(rev):
541 """Return whether a revision's content has been censored."""
541 """Return whether a revision's content has been censored."""
542
542
543 def commonancestorsheads(node1, node2):
543 def commonancestorsheads(node1, node2):
544 """Obtain an iterable of nodes containing heads of common ancestors.
544 """Obtain an iterable of nodes containing heads of common ancestors.
545
545
546 See ``ancestor.commonancestorsheads()``.
546 See ``ancestor.commonancestorsheads()``.
547 """
547 """
548
548
549 def descendants(revs):
549 def descendants(revs):
550 """Obtain descendant revision numbers for a set of revision numbers.
550 """Obtain descendant revision numbers for a set of revision numbers.
551
551
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
553 """
553 """
554
554
555 def heads(start=None, stop=None):
555 def heads(start=None, stop=None):
556 """Obtain a list of nodes that are DAG heads, with control.
556 """Obtain a list of nodes that are DAG heads, with control.
557
557
558 The set of revisions examined can be limited by specifying
558 The set of revisions examined can be limited by specifying
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
560 iterable of nodes. DAG traversal starts at earlier revision
560 iterable of nodes. DAG traversal starts at earlier revision
561 ``start`` and iterates forward until any node in ``stop`` is
561 ``start`` and iterates forward until any node in ``stop`` is
562 encountered.
562 encountered.
563 """
563 """
564
564
565 def children(node):
565 def children(node):
566 """Obtain nodes that are children of a node.
566 """Obtain nodes that are children of a node.
567
567
568 Returns a list of nodes.
568 Returns a list of nodes.
569 """
569 """
570
570
571 class ifiledata(interfaceutil.Interface):
571 class ifiledata(interfaceutil.Interface):
572 """Storage interface for data storage of a specific file.
572 """Storage interface for data storage of a specific file.
573
573
574 This complements ``ifileindex`` and provides an interface for accessing
574 This complements ``ifileindex`` and provides an interface for accessing
575 data for a tracked file.
575 data for a tracked file.
576 """
576 """
577 def size(rev):
577 def size(rev):
578 """Obtain the fulltext size of file data.
578 """Obtain the fulltext size of file data.
579
579
580 Any metadata is excluded from size measurements.
580 Any metadata is excluded from size measurements.
581 """
581 """
582
582
583 def revision(node, raw=False):
583 def revision(node, raw=False):
584 """"Obtain fulltext data for a node.
584 """"Obtain fulltext data for a node.
585
585
586 By default, any storage transformations are applied before the data
586 By default, any storage transformations are applied before the data
587 is returned. If ``raw`` is True, non-raw storage transformations
587 is returned. If ``raw`` is True, non-raw storage transformations
588 are not applied.
588 are not applied.
589
589
590 The fulltext data may contain a header containing metadata. Most
590 The fulltext data may contain a header containing metadata. Most
591 consumers should use ``read()`` to obtain the actual file data.
591 consumers should use ``read()`` to obtain the actual file data.
592 """
592 """
593
593
594 def read(node):
594 def read(node):
595 """Resolve file fulltext data.
595 """Resolve file fulltext data.
596
596
597 This is similar to ``revision()`` except any metadata in the data
597 This is similar to ``revision()`` except any metadata in the data
598 headers is stripped.
598 headers is stripped.
599 """
599 """
600
600
601 def renamed(node):
601 def renamed(node):
602 """Obtain copy metadata for a node.
602 """Obtain copy metadata for a node.
603
603
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
605 (path, node) from which this revision was copied.
605 (path, node) from which this revision was copied.
606 """
606 """
607
607
608 def cmp(node, fulltext):
608 def cmp(node, fulltext):
609 """Compare fulltext to another revision.
609 """Compare fulltext to another revision.
610
610
611 Returns True if the fulltext is different from what is stored.
611 Returns True if the fulltext is different from what is stored.
612
612
613 This takes copy metadata into account.
613 This takes copy metadata into account.
614
614
615 TODO better document the copy metadata and censoring logic.
615 TODO better document the copy metadata and censoring logic.
616 """
616 """
617
617
618 def emitrevisions(nodes,
618 def emitrevisions(nodes,
619 nodesorder=None,
619 nodesorder=None,
620 revisiondata=False,
620 revisiondata=False,
621 assumehaveparentrevisions=False,
621 assumehaveparentrevisions=False,
622 deltamode=CG_DELTAMODE_STD):
622 deltamode=CG_DELTAMODE_STD):
623 """Produce ``irevisiondelta`` for revisions.
623 """Produce ``irevisiondelta`` for revisions.
624
624
625 Given an iterable of nodes, emits objects conforming to the
625 Given an iterable of nodes, emits objects conforming to the
626 ``irevisiondelta`` interface that describe revisions in storage.
626 ``irevisiondelta`` interface that describe revisions in storage.
627
627
628 This method is a generator.
628 This method is a generator.
629
629
630 The input nodes may be unordered. Implementations must ensure that a
630 The input nodes may be unordered. Implementations must ensure that a
631 node's parents are emitted before the node itself. Transitively, this
631 node's parents are emitted before the node itself. Transitively, this
632 means that a node may only be emitted once all its ancestors in
632 means that a node may only be emitted once all its ancestors in
633 ``nodes`` have also been emitted.
633 ``nodes`` have also been emitted.
634
634
635 By default, emits "index" data (the ``node``, ``p1node``, and
635 By default, emits "index" data (the ``node``, ``p1node``, and
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
637 will also be present on the emitted objects.
637 will also be present on the emitted objects.
638
638
639 With default argument values, implementations can choose to emit
639 With default argument values, implementations can choose to emit
640 either fulltext revision data or a delta. When emitting deltas,
640 either fulltext revision data or a delta. When emitting deltas,
641 implementations must consider whether the delta's base revision
641 implementations must consider whether the delta's base revision
642 fulltext is available to the receiver.
642 fulltext is available to the receiver.
643
643
644 The base revision fulltext is guaranteed to be available if any of
644 The base revision fulltext is guaranteed to be available if any of
645 the following are met:
645 the following are met:
646
646
647 * Its fulltext revision was emitted by this method call.
647 * Its fulltext revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
649 * ``assumehaveparentrevisions`` is True and the base revision is a
649 * ``assumehaveparentrevisions`` is True and the base revision is a
650 parent of the node.
650 parent of the node.
651
651
652 ``nodesorder`` can be used to control the order that revisions are
652 ``nodesorder`` can be used to control the order that revisions are
653 emitted. By default, revisions can be reordered as long as they are
653 emitted. By default, revisions can be reordered as long as they are
654 in DAG topological order (see above). If the value is ``nodes``,
654 in DAG topological order (see above). If the value is ``nodes``,
655 the iteration order from ``nodes`` should be used. If the value is
655 the iteration order from ``nodes`` should be used. If the value is
656 ``storage``, then the native order from the backing storage layer
656 ``storage``, then the native order from the backing storage layer
657 is used. (Not all storage layers will have strong ordering and behavior
657 is used. (Not all storage layers will have strong ordering and behavior
658 of this mode is storage-dependent.) ``nodes`` ordering can force
658 of this mode is storage-dependent.) ``nodes`` ordering can force
659 revisions to be emitted before their ancestors, so consumers should
659 revisions to be emitted before their ancestors, so consumers should
660 use it with care.
660 use it with care.
661
661
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
663 be set and it is the caller's responsibility to resolve it, if needed.
663 be set and it is the caller's responsibility to resolve it, if needed.
664
664
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
666 all revision data should be emitted as deltas against the revision
666 all revision data should be emitted as deltas against the revision
667 emitted just prior. The initial revision should be a delta against its
667 emitted just prior. The initial revision should be a delta against its
668 1st parent.
668 1st parent.
669 """
669 """
670
670
671 class ifilemutation(interfaceutil.Interface):
671 class ifilemutation(interfaceutil.Interface):
672 """Storage interface for mutation events of a tracked file."""
672 """Storage interface for mutation events of a tracked file."""
673
673
674 def add(filedata, meta, transaction, linkrev, p1, p2):
674 def add(filedata, meta, transaction, linkrev, p1, p2):
675 """Add a new revision to the store.
675 """Add a new revision to the store.
676
676
677 Takes file data, dictionary of metadata, a transaction, linkrev,
677 Takes file data, dictionary of metadata, a transaction, linkrev,
678 and parent nodes.
678 and parent nodes.
679
679
680 Returns the node that was added.
680 Returns the node that was added.
681
681
682 May no-op if a revision matching the supplied data is already stored.
682 May no-op if a revision matching the supplied data is already stored.
683 """
683 """
684
684
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
686 flags=0, cachedelta=None):
686 flags=0, cachedelta=None):
687 """Add a new revision to the store.
687 """Add a new revision to the store.
688
688
689 This is similar to ``add()`` except it operates at a lower level.
689 This is similar to ``add()`` except it operates at a lower level.
690
690
691 The data passed in already contains a metadata header, if any.
691 The data passed in already contains a metadata header, if any.
692
692
693 ``node`` and ``flags`` can be used to define the expected node and
693 ``node`` and ``flags`` can be used to define the expected node and
694 the flags to use with storage. ``flags`` is a bitwise value composed
694 the flags to use with storage. ``flags`` is a bitwise value composed
695 of the various ``REVISION_FLAG_*`` constants.
695 of the various ``REVISION_FLAG_*`` constants.
696
696
697 ``add()`` is usually called when adding files from e.g. the working
697 ``add()`` is usually called when adding files from e.g. the working
698 directory. ``addrevision()`` is often called by ``add()`` and for
698 directory. ``addrevision()`` is often called by ``add()`` and for
699 scenarios where revision data has already been computed, such as when
699 scenarios where revision data has already been computed, such as when
700 applying raw data from a peer repo.
700 applying raw data from a peer repo.
701 """
701 """
702
702
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
704 maybemissingparents=False):
704 maybemissingparents=False):
705 """Process a series of deltas for storage.
705 """Process a series of deltas for storage.
706
706
707 ``deltas`` is an iterable of 7-tuples of
707 ``deltas`` is an iterable of 7-tuples of
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
709 to add.
709 to add.
710
710
711 The ``delta`` field contains ``mpatch`` data to apply to a base
711 The ``delta`` field contains ``mpatch`` data to apply to a base
712 revision, identified by ``deltabase``. The base node can be
712 revision, identified by ``deltabase``. The base node can be
713 ``nullid``, in which case the header from the delta can be ignored
713 ``nullid``, in which case the header from the delta can be ignored
714 and the delta used as the fulltext.
714 and the delta used as the fulltext.
715
715
716 ``addrevisioncb`` should be called for each node as it is committed.
716 ``addrevisioncb`` should be called for each node as it is committed.
717
717
718 ``maybemissingparents`` is a bool indicating whether the incoming
718 ``maybemissingparents`` is a bool indicating whether the incoming
719 data may reference parents/ancestor revisions that aren't present.
719 data may reference parents/ancestor revisions that aren't present.
720 This flag is set when receiving data into a "shallow" store that
720 This flag is set when receiving data into a "shallow" store that
721 doesn't hold all history.
721 doesn't hold all history.
722
722
723 Returns a list of nodes that were processed. A node will be in the list
723 Returns a list of nodes that were processed. A node will be in the list
724 even if it existed in the store previously.
724 even if it existed in the store previously.
725 """
725 """
726
726
727 def censorrevision(tr, node, tombstone=b''):
727 def censorrevision(tr, node, tombstone=b''):
728 """Remove the content of a single revision.
728 """Remove the content of a single revision.
729
729
730 The specified ``node`` will have its content purged from storage.
730 The specified ``node`` will have its content purged from storage.
731 Future attempts to access the revision data for this node will
731 Future attempts to access the revision data for this node will
732 result in failure.
732 result in failure.
733
733
734 A ``tombstone`` message can optionally be stored. This message may be
734 A ``tombstone`` message can optionally be stored. This message may be
735 displayed to users when they attempt to access the missing revision
735 displayed to users when they attempt to access the missing revision
736 data.
736 data.
737
737
738 Storage backends may have stored deltas against the previous content
738 Storage backends may have stored deltas against the previous content
739 in this revision. As part of censoring a revision, these storage
739 in this revision. As part of censoring a revision, these storage
740 backends are expected to rewrite any internally stored deltas such
740 backends are expected to rewrite any internally stored deltas such
741 that they no longer reference the deleted content.
741 that they no longer reference the deleted content.
742 """
742 """
743
743
744 def getstrippoint(minlink):
744 def getstrippoint(minlink):
745 """Find the minimum revision that must be stripped to strip a linkrev.
745 """Find the minimum revision that must be stripped to strip a linkrev.
746
746
747 Returns a 2-tuple containing the minimum revision number and a set
747 Returns a 2-tuple containing the minimum revision number and a set
748 of all revisions numbers that would be broken by this strip.
748 of all revisions numbers that would be broken by this strip.
749
749
750 TODO this is highly revlog centric and should be abstracted into
750 TODO this is highly revlog centric and should be abstracted into
751 a higher-level deletion API. ``repair.strip()`` relies on this.
751 a higher-level deletion API. ``repair.strip()`` relies on this.
752 """
752 """
753
753
754 def strip(minlink, transaction):
754 def strip(minlink, transaction):
755 """Remove storage of items starting at a linkrev.
755 """Remove storage of items starting at a linkrev.
756
756
757 This uses ``getstrippoint()`` to determine the first node to remove.
757 This uses ``getstrippoint()`` to determine the first node to remove.
758 Then it effectively truncates storage for all revisions after that.
758 Then it effectively truncates storage for all revisions after that.
759
759
760 TODO this is highly revlog centric and should be abstracted into a
760 TODO this is highly revlog centric and should be abstracted into a
761 higher-level deletion API.
761 higher-level deletion API.
762 """
762 """
763
763
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
765 """Complete storage interface for a single tracked file."""
765 """Complete storage interface for a single tracked file."""
766
766
767 def files():
767 def files():
768 """Obtain paths that are backing storage for this file.
768 """Obtain paths that are backing storage for this file.
769
769
770 TODO this is used heavily by verify code and there should probably
770 TODO this is used heavily by verify code and there should probably
771 be a better API for that.
771 be a better API for that.
772 """
772 """
773
773
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
775 revisionscount=False, trackedsize=False,
775 revisionscount=False, trackedsize=False,
776 storedsize=False):
776 storedsize=False):
777 """Obtain information about storage for this file's data.
777 """Obtain information about storage for this file's data.
778
778
779 Returns a dict describing storage for this tracked path. The keys
779 Returns a dict describing storage for this tracked path. The keys
780 in the dict map to arguments of the same. The arguments are bools
780 in the dict map to arguments of the same. The arguments are bools
781 indicating whether to calculate and obtain that data.
781 indicating whether to calculate and obtain that data.
782
782
783 exclusivefiles
783 exclusivefiles
784 Iterable of (vfs, path) describing files that are exclusively
784 Iterable of (vfs, path) describing files that are exclusively
785 used to back storage for this tracked path.
785 used to back storage for this tracked path.
786
786
787 sharedfiles
787 sharedfiles
788 Iterable of (vfs, path) describing files that are used to back
788 Iterable of (vfs, path) describing files that are used to back
789 storage for this tracked path. Those files may also provide storage
789 storage for this tracked path. Those files may also provide storage
790 for other stored entities.
790 for other stored entities.
791
791
792 revisionscount
792 revisionscount
793 Number of revisions available for retrieval.
793 Number of revisions available for retrieval.
794
794
795 trackedsize
795 trackedsize
796 Total size in bytes of all tracked revisions. This is a sum of the
796 Total size in bytes of all tracked revisions. This is a sum of the
797 length of the fulltext of all revisions.
797 length of the fulltext of all revisions.
798
798
799 storedsize
799 storedsize
800 Total size in bytes used to store data for all tracked revisions.
800 Total size in bytes used to store data for all tracked revisions.
801 This is commonly less than ``trackedsize`` due to internal usage
801 This is commonly less than ``trackedsize`` due to internal usage
802 of deltas rather than fulltext revisions.
802 of deltas rather than fulltext revisions.
803
803
804 Not all storage backends may support all queries are have a reasonable
804 Not all storage backends may support all queries are have a reasonable
805 value to use. In that case, the value should be set to ``None`` and
805 value to use. In that case, the value should be set to ``None`` and
806 callers are expected to handle this special value.
806 callers are expected to handle this special value.
807 """
807 """
808
808
809 def verifyintegrity(state):
809 def verifyintegrity(state):
810 """Verifies the integrity of file storage.
810 """Verifies the integrity of file storage.
811
811
812 ``state`` is a dict holding state of the verifier process. It can be
812 ``state`` is a dict holding state of the verifier process. It can be
813 used to communicate data between invocations of multiple storage
813 used to communicate data between invocations of multiple storage
814 primitives.
814 primitives.
815
815
816 If individual revisions cannot have their revision content resolved,
816 If individual revisions cannot have their revision content resolved,
817 the method is expected to set the ``skipread`` key to a set of nodes
817 the method is expected to set the ``skipread`` key to a set of nodes
818 that encountered problems.
818 that encountered problems.
819
819
820 The method yields objects conforming to the ``iverifyproblem``
820 The method yields objects conforming to the ``iverifyproblem``
821 interface.
821 interface.
822 """
822 """
823
823
824 class idirs(interfaceutil.Interface):
824 class idirs(interfaceutil.Interface):
825 """Interface representing a collection of directories from paths.
825 """Interface representing a collection of directories from paths.
826
826
827 This interface is essentially a derived data structure representing
827 This interface is essentially a derived data structure representing
828 directories from a collection of paths.
828 directories from a collection of paths.
829 """
829 """
830
830
831 def addpath(path):
831 def addpath(path):
832 """Add a path to the collection.
832 """Add a path to the collection.
833
833
834 All directories in the path will be added to the collection.
834 All directories in the path will be added to the collection.
835 """
835 """
836
836
837 def delpath(path):
837 def delpath(path):
838 """Remove a path from the collection.
838 """Remove a path from the collection.
839
839
840 If the removal was the last path in a particular directory, the
840 If the removal was the last path in a particular directory, the
841 directory is removed from the collection.
841 directory is removed from the collection.
842 """
842 """
843
843
844 def __iter__():
844 def __iter__():
845 """Iterate over the directories in this collection of paths."""
845 """Iterate over the directories in this collection of paths."""
846
846
847 def __contains__(path):
847 def __contains__(path):
848 """Whether a specific directory is in this collection."""
848 """Whether a specific directory is in this collection."""
849
849
850 class imanifestdict(interfaceutil.Interface):
850 class imanifestdict(interfaceutil.Interface):
851 """Interface representing a manifest data structure.
851 """Interface representing a manifest data structure.
852
852
853 A manifest is effectively a dict mapping paths to entries. Each entry
853 A manifest is effectively a dict mapping paths to entries. Each entry
854 consists of a binary node and extra flags affecting that entry.
854 consists of a binary node and extra flags affecting that entry.
855 """
855 """
856
856
857 def __getitem__(path):
857 def __getitem__(path):
858 """Returns the binary node value for a path in the manifest.
858 """Returns the binary node value for a path in the manifest.
859
859
860 Raises ``KeyError`` if the path does not exist in the manifest.
860 Raises ``KeyError`` if the path does not exist in the manifest.
861
861
862 Equivalent to ``self.find(path)[0]``.
862 Equivalent to ``self.find(path)[0]``.
863 """
863 """
864
864
865 def find(path):
865 def find(path):
866 """Returns the entry for a path in the manifest.
866 """Returns the entry for a path in the manifest.
867
867
868 Returns a 2-tuple of (node, flags).
868 Returns a 2-tuple of (node, flags).
869
869
870 Raises ``KeyError`` if the path does not exist in the manifest.
870 Raises ``KeyError`` if the path does not exist in the manifest.
871 """
871 """
872
872
873 def __len__():
873 def __len__():
874 """Return the number of entries in the manifest."""
874 """Return the number of entries in the manifest."""
875
875
876 def __nonzero__():
876 def __nonzero__():
877 """Returns True if the manifest has entries, False otherwise."""
877 """Returns True if the manifest has entries, False otherwise."""
878
878
879 __bool__ = __nonzero__
879 __bool__ = __nonzero__
880
880
881 def __setitem__(path, node):
881 def __setitem__(path, node):
882 """Define the node value for a path in the manifest.
882 """Define the node value for a path in the manifest.
883
883
884 If the path is already in the manifest, its flags will be copied to
884 If the path is already in the manifest, its flags will be copied to
885 the new entry.
885 the new entry.
886 """
886 """
887
887
888 def __contains__(path):
888 def __contains__(path):
889 """Whether a path exists in the manifest."""
889 """Whether a path exists in the manifest."""
890
890
891 def __delitem__(path):
891 def __delitem__(path):
892 """Remove a path from the manifest.
892 """Remove a path from the manifest.
893
893
894 Raises ``KeyError`` if the path is not in the manifest.
894 Raises ``KeyError`` if the path is not in the manifest.
895 """
895 """
896
896
897 def __iter__():
897 def __iter__():
898 """Iterate over paths in the manifest."""
898 """Iterate over paths in the manifest."""
899
899
900 def iterkeys():
900 def iterkeys():
901 """Iterate over paths in the manifest."""
901 """Iterate over paths in the manifest."""
902
902
903 def keys():
903 def keys():
904 """Obtain a list of paths in the manifest."""
904 """Obtain a list of paths in the manifest."""
905
905
906 def filesnotin(other, match=None):
906 def filesnotin(other, match=None):
907 """Obtain the set of paths in this manifest but not in another.
907 """Obtain the set of paths in this manifest but not in another.
908
908
909 ``match`` is an optional matcher function to be applied to both
909 ``match`` is an optional matcher function to be applied to both
910 manifests.
910 manifests.
911
911
912 Returns a set of paths.
912 Returns a set of paths.
913 """
913 """
914
914
915 def dirs():
915 def dirs():
916 """Returns an object implementing the ``idirs`` interface."""
916 """Returns an object implementing the ``idirs`` interface."""
917
917
918 def hasdir(dir):
918 def hasdir(dir):
919 """Returns a bool indicating if a directory is in this manifest."""
919 """Returns a bool indicating if a directory is in this manifest."""
920
920
921 def matches(match):
921 def matches(match):
922 """Generate a new manifest filtered through a matcher.
922 """Generate a new manifest filtered through a matcher.
923
923
924 Returns an object conforming to the ``imanifestdict`` interface.
924 Returns an object conforming to the ``imanifestdict`` interface.
925 """
925 """
926
926
927 def walk(match):
927 def walk(match):
928 """Generator of paths in manifest satisfying a matcher.
928 """Generator of paths in manifest satisfying a matcher.
929
929
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
931 manifest object is not created.
931 manifest object is not created.
932
932
933 If the matcher has explicit files listed and they don't exist in
933 If the matcher has explicit files listed and they don't exist in
934 the manifest, ``match.bad()`` is called for each missing file.
934 the manifest, ``match.bad()`` is called for each missing file.
935 """
935 """
936
936
937 def diff(other, match=None, clean=False):
937 def diff(other, match=None, clean=False):
938 """Find differences between this manifest and another.
938 """Find differences between this manifest and another.
939
939
940 This manifest is compared to ``other``.
940 This manifest is compared to ``other``.
941
941
942 If ``match`` is provided, the two manifests are filtered against this
942 If ``match`` is provided, the two manifests are filtered against this
943 matcher and only entries satisfying the matcher are compared.
943 matcher and only entries satisfying the matcher are compared.
944
944
945 If ``clean`` is True, unchanged files are included in the returned
945 If ``clean`` is True, unchanged files are included in the returned
946 object.
946 object.
947
947
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
951 are the same for the other manifest.
951 are the same for the other manifest.
952 """
952 """
953
953
954 def setflag(path, flag):
954 def setflag(path, flag):
955 """Set the flag value for a given path.
955 """Set the flag value for a given path.
956
956
957 Raises ``KeyError`` if the path is not already in the manifest.
957 Raises ``KeyError`` if the path is not already in the manifest.
958 """
958 """
959
959
960 def get(path, default=None):
960 def get(path, default=None):
961 """Obtain the node value for a path or a default value if missing."""
961 """Obtain the node value for a path or a default value if missing."""
962
962
963 def flags(path, default=''):
963 def flags(path, default=''):
964 """Return the flags value for a path or a default value if missing."""
964 """Return the flags value for a path or a default value if missing."""
965
965
966 def copy():
966 def copy():
967 """Return a copy of this manifest."""
967 """Return a copy of this manifest."""
968
968
969 def items():
969 def items():
970 """Returns an iterable of (path, node) for items in this manifest."""
970 """Returns an iterable of (path, node) for items in this manifest."""
971
971
972 def iteritems():
972 def iteritems():
973 """Identical to items()."""
973 """Identical to items()."""
974
974
975 def iterentries():
975 def iterentries():
976 """Returns an iterable of (path, node, flags) for this manifest.
976 """Returns an iterable of (path, node, flags) for this manifest.
977
977
978 Similar to ``iteritems()`` except items are a 3-tuple and include
978 Similar to ``iteritems()`` except items are a 3-tuple and include
979 flags.
979 flags.
980 """
980 """
981
981
982 def text():
982 def text():
983 """Obtain the raw data representation for this manifest.
983 """Obtain the raw data representation for this manifest.
984
984
985 Result is used to create a manifest revision.
985 Result is used to create a manifest revision.
986 """
986 """
987
987
988 def fastdelta(base, changes):
988 def fastdelta(base, changes):
989 """Obtain a delta between this manifest and another given changes.
989 """Obtain a delta between this manifest and another given changes.
990
990
991 ``base`` in the raw data representation for another manifest.
991 ``base`` in the raw data representation for another manifest.
992
992
993 ``changes`` is an iterable of ``(path, to_delete)``.
993 ``changes`` is an iterable of ``(path, to_delete)``.
994
994
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
996 delta between ``base`` and this manifest.
996 delta between ``base`` and this manifest.
997 """
997 """
998
998
999 class imanifestrevisionbase(interfaceutil.Interface):
999 class imanifestrevisionbase(interfaceutil.Interface):
1000 """Base interface representing a single revision of a manifest.
1000 """Base interface representing a single revision of a manifest.
1001
1001
1002 Should not be used as a primary interface: should always be inherited
1002 Should not be used as a primary interface: should always be inherited
1003 as part of a larger interface.
1003 as part of a larger interface.
1004 """
1004 """
1005
1005
1006 def new():
1006 def new():
1007 """Obtain a new manifest instance.
1007 """Obtain a new manifest instance.
1008
1008
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1010 interface. The instance will be associated with the same
1010 interface. The instance will be associated with the same
1011 ``imanifestlog`` collection as this instance.
1011 ``imanifestlog`` collection as this instance.
1012 """
1012 """
1013
1013
1014 def copy():
1014 def copy():
1015 """Obtain a copy of this manifest instance.
1015 """Obtain a copy of this manifest instance.
1016
1016
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1018 interface. The instance will be associated with the same
1018 interface. The instance will be associated with the same
1019 ``imanifestlog`` collection as this instance.
1019 ``imanifestlog`` collection as this instance.
1020 """
1020 """
1021
1021
1022 def read():
1022 def read():
1023 """Obtain the parsed manifest data structure.
1023 """Obtain the parsed manifest data structure.
1024
1024
1025 The returned object conforms to the ``imanifestdict`` interface.
1025 The returned object conforms to the ``imanifestdict`` interface.
1026 """
1026 """
1027
1027
1028 class imanifestrevisionstored(imanifestrevisionbase):
1028 class imanifestrevisionstored(imanifestrevisionbase):
1029 """Interface representing a manifest revision committed to storage."""
1029 """Interface representing a manifest revision committed to storage."""
1030
1030
1031 def node():
1031 def node():
1032 """The binary node for this manifest."""
1032 """The binary node for this manifest."""
1033
1033
1034 parents = interfaceutil.Attribute(
1034 parents = interfaceutil.Attribute(
1035 """List of binary nodes that are parents for this manifest revision."""
1035 """List of binary nodes that are parents for this manifest revision."""
1036 )
1036 )
1037
1037
1038 def readdelta(shallow=False):
1038 def readdelta(shallow=False):
1039 """Obtain the manifest data structure representing changes from parent.
1039 """Obtain the manifest data structure representing changes from parent.
1040
1040
1041 This manifest is compared to its 1st parent. A new manifest representing
1041 This manifest is compared to its 1st parent. A new manifest representing
1042 those differences is constructed.
1042 those differences is constructed.
1043
1043
1044 The returned object conforms to the ``imanifestdict`` interface.
1044 The returned object conforms to the ``imanifestdict`` interface.
1045 """
1045 """
1046
1046
1047 def readfast(shallow=False):
1047 def readfast(shallow=False):
1048 """Calls either ``read()`` or ``readdelta()``.
1048 """Calls either ``read()`` or ``readdelta()``.
1049
1049
1050 The faster of the two options is called.
1050 The faster of the two options is called.
1051 """
1051 """
1052
1052
1053 def find(key):
1053 def find(key):
1054 """Calls self.read().find(key)``.
1054 """Calls self.read().find(key)``.
1055
1055
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1057 """
1057 """
1058
1058
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1060 """Interface representing a manifest revision that can be committed."""
1060 """Interface representing a manifest revision that can be committed."""
1061
1061
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1063 """Add this revision to storage.
1063 """Add this revision to storage.
1064
1064
1065 Takes a transaction object, the changeset revision number it will
1065 Takes a transaction object, the changeset revision number it will
1066 be associated with, its parent nodes, and lists of added and
1066 be associated with, its parent nodes, and lists of added and
1067 removed paths.
1067 removed paths.
1068
1068
1069 If match is provided, storage can choose not to inspect or write out
1069 If match is provided, storage can choose not to inspect or write out
1070 items that do not match. Storage is still required to be able to provide
1070 items that do not match. Storage is still required to be able to provide
1071 the full manifest in the future for any directories written (these
1071 the full manifest in the future for any directories written (these
1072 manifests should not be "narrowed on disk").
1072 manifests should not be "narrowed on disk").
1073
1073
1074 Returns the binary node of the created revision.
1074 Returns the binary node of the created revision.
1075 """
1075 """
1076
1076
1077 class imanifeststorage(interfaceutil.Interface):
1077 class imanifeststorage(interfaceutil.Interface):
1078 """Storage interface for manifest data."""
1078 """Storage interface for manifest data."""
1079
1079
1080 tree = interfaceutil.Attribute(
1080 tree = interfaceutil.Attribute(
1081 """The path to the directory this manifest tracks.
1081 """The path to the directory this manifest tracks.
1082
1082
1083 The empty bytestring represents the root manifest.
1083 The empty bytestring represents the root manifest.
1084 """)
1084 """)
1085
1085
1086 index = interfaceutil.Attribute(
1086 index = interfaceutil.Attribute(
1087 """An ``ifilerevisionssequence`` instance.""")
1087 """An ``ifilerevisionssequence`` instance.""")
1088
1088
1089 indexfile = interfaceutil.Attribute(
1089 indexfile = interfaceutil.Attribute(
1090 """Path of revlog index file.
1090 """Path of revlog index file.
1091
1091
1092 TODO this is revlog specific and should not be exposed.
1092 TODO this is revlog specific and should not be exposed.
1093 """)
1093 """)
1094
1094
1095 opener = interfaceutil.Attribute(
1095 opener = interfaceutil.Attribute(
1096 """VFS opener to use to access underlying files used for storage.
1096 """VFS opener to use to access underlying files used for storage.
1097
1097
1098 TODO this is revlog specific and should not be exposed.
1098 TODO this is revlog specific and should not be exposed.
1099 """)
1099 """)
1100
1100
1101 version = interfaceutil.Attribute(
1101 version = interfaceutil.Attribute(
1102 """Revlog version number.
1102 """Revlog version number.
1103
1103
1104 TODO this is revlog specific and should not be exposed.
1104 TODO this is revlog specific and should not be exposed.
1105 """)
1105 """)
1106
1106
1107 _generaldelta = interfaceutil.Attribute(
1107 _generaldelta = interfaceutil.Attribute(
1108 """Whether generaldelta storage is being used.
1108 """Whether generaldelta storage is being used.
1109
1109
1110 TODO this is revlog specific and should not be exposed.
1110 TODO this is revlog specific and should not be exposed.
1111 """)
1111 """)
1112
1112
1113 fulltextcache = interfaceutil.Attribute(
1113 fulltextcache = interfaceutil.Attribute(
1114 """Dict with cache of fulltexts.
1114 """Dict with cache of fulltexts.
1115
1115
1116 TODO this doesn't feel appropriate for the storage interface.
1116 TODO this doesn't feel appropriate for the storage interface.
1117 """)
1117 """)
1118
1118
1119 def __len__():
1119 def __len__():
1120 """Obtain the number of revisions stored for this manifest."""
1120 """Obtain the number of revisions stored for this manifest."""
1121
1121
1122 def __iter__():
1122 def __iter__():
1123 """Iterate over revision numbers for this manifest."""
1123 """Iterate over revision numbers for this manifest."""
1124
1124
1125 def rev(node):
1125 def rev(node):
1126 """Obtain the revision number given a binary node.
1126 """Obtain the revision number given a binary node.
1127
1127
1128 Raises ``error.LookupError`` if the node is not known.
1128 Raises ``error.LookupError`` if the node is not known.
1129 """
1129 """
1130
1130
1131 def node(rev):
1131 def node(rev):
1132 """Obtain the node value given a revision number.
1132 """Obtain the node value given a revision number.
1133
1133
1134 Raises ``error.LookupError`` if the revision is not known.
1134 Raises ``error.LookupError`` if the revision is not known.
1135 """
1135 """
1136
1136
1137 def lookup(value):
1137 def lookup(value):
1138 """Attempt to resolve a value to a node.
1138 """Attempt to resolve a value to a node.
1139
1139
1140 Value can be a binary node, hex node, revision number, or a bytes
1140 Value can be a binary node, hex node, revision number, or a bytes
1141 that can be converted to an integer.
1141 that can be converted to an integer.
1142
1142
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1144 """
1144 """
1145
1145
1146 def parents(node):
1146 def parents(node):
1147 """Returns a 2-tuple of parent nodes for a node.
1147 """Returns a 2-tuple of parent nodes for a node.
1148
1148
1149 Values will be ``nullid`` if the parent is empty.
1149 Values will be ``nullid`` if the parent is empty.
1150 """
1150 """
1151
1151
1152 def parentrevs(rev):
1152 def parentrevs(rev):
1153 """Like parents() but operates on revision numbers."""
1153 """Like parents() but operates on revision numbers."""
1154
1154
1155 def linkrev(rev):
1155 def linkrev(rev):
1156 """Obtain the changeset revision number a revision is linked to."""
1156 """Obtain the changeset revision number a revision is linked to."""
1157
1157
1158 def revision(node, _df=None, raw=False):
1158 def revision(node, _df=None, raw=False):
1159 """Obtain fulltext data for a node."""
1159 """Obtain fulltext data for a node."""
1160
1160
1161 def revdiff(rev1, rev2):
1161 def revdiff(rev1, rev2):
1162 """Obtain a delta between two revision numbers.
1162 """Obtain a delta between two revision numbers.
1163
1163
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1165 revision data.
1165 revision data.
1166 """
1166 """
1167
1167
1168 def cmp(node, fulltext):
1168 def cmp(node, fulltext):
1169 """Compare fulltext to another revision.
1169 """Compare fulltext to another revision.
1170
1170
1171 Returns True if the fulltext is different from what is stored.
1171 Returns True if the fulltext is different from what is stored.
1172 """
1172 """
1173
1173
1174 def emitrevisions(nodes,
1174 def emitrevisions(nodes,
1175 nodesorder=None,
1175 nodesorder=None,
1176 revisiondata=False,
1176 revisiondata=False,
1177 assumehaveparentrevisions=False):
1177 assumehaveparentrevisions=False):
1178 """Produce ``irevisiondelta`` describing revisions.
1178 """Produce ``irevisiondelta`` describing revisions.
1179
1179
1180 See the documentation for ``ifiledata`` for more.
1180 See the documentation for ``ifiledata`` for more.
1181 """
1181 """
1182
1182
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1184 """Process a series of deltas for storage.
1184 """Process a series of deltas for storage.
1185
1185
1186 See the documentation in ``ifilemutation`` for more.
1186 See the documentation in ``ifilemutation`` for more.
1187 """
1187 """
1188
1188
1189 def rawsize(rev):
1189 def rawsize(rev):
1190 """Obtain the size of tracked data.
1190 """Obtain the size of tracked data.
1191
1191
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1193
1193
1194 TODO this method is only used by upgrade code and may be removed.
1194 TODO this method is only used by upgrade code and may be removed.
1195 """
1195 """
1196
1196
1197 def getstrippoint(minlink):
1197 def getstrippoint(minlink):
1198 """Find minimum revision that must be stripped to strip a linkrev.
1198 """Find minimum revision that must be stripped to strip a linkrev.
1199
1199
1200 See the documentation in ``ifilemutation`` for more.
1200 See the documentation in ``ifilemutation`` for more.
1201 """
1201 """
1202
1202
1203 def strip(minlink, transaction):
1203 def strip(minlink, transaction):
1204 """Remove storage of items starting at a linkrev.
1204 """Remove storage of items starting at a linkrev.
1205
1205
1206 See the documentation in ``ifilemutation`` for more.
1206 See the documentation in ``ifilemutation`` for more.
1207 """
1207 """
1208
1208
1209 def checksize():
1209 def checksize():
1210 """Obtain the expected sizes of backing files.
1210 """Obtain the expected sizes of backing files.
1211
1211
1212 TODO this is used by verify and it should not be part of the interface.
1212 TODO this is used by verify and it should not be part of the interface.
1213 """
1213 """
1214
1214
1215 def files():
1215 def files():
1216 """Obtain paths that are backing storage for this manifest.
1216 """Obtain paths that are backing storage for this manifest.
1217
1217
1218 TODO this is used by verify and there should probably be a better API
1218 TODO this is used by verify and there should probably be a better API
1219 for this functionality.
1219 for this functionality.
1220 """
1220 """
1221
1221
1222 def deltaparent(rev):
1222 def deltaparent(rev):
1223 """Obtain the revision that a revision is delta'd against.
1223 """Obtain the revision that a revision is delta'd against.
1224
1224
1225 TODO delta encoding is an implementation detail of storage and should
1225 TODO delta encoding is an implementation detail of storage and should
1226 not be exposed to the storage interface.
1226 not be exposed to the storage interface.
1227 """
1227 """
1228
1228
1229 def clone(tr, dest, **kwargs):
1229 def clone(tr, dest, **kwargs):
1230 """Clone this instance to another."""
1230 """Clone this instance to another."""
1231
1231
1232 def clearcaches(clear_persisted_data=False):
1232 def clearcaches(clear_persisted_data=False):
1233 """Clear any caches associated with this instance."""
1233 """Clear any caches associated with this instance."""
1234
1234
1235 def dirlog(d):
1235 def dirlog(d):
1236 """Obtain a manifest storage instance for a tree."""
1236 """Obtain a manifest storage instance for a tree."""
1237
1237
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1239 match=None):
1239 match=None):
1240 """Add a revision to storage.
1240 """Add a revision to storage.
1241
1241
1242 ``m`` is an object conforming to ``imanifestdict``.
1242 ``m`` is an object conforming to ``imanifestdict``.
1243
1243
1244 ``link`` is the linkrev revision number.
1244 ``link`` is the linkrev revision number.
1245
1245
1246 ``p1`` and ``p2`` are the parent revision numbers.
1246 ``p1`` and ``p2`` are the parent revision numbers.
1247
1247
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1249 respectively.
1249 respectively.
1250
1250
1251 ``readtree`` is a function that can be used to read the child tree(s)
1251 ``readtree`` is a function that can be used to read the child tree(s)
1252 when recursively writing the full tree structure when using
1252 when recursively writing the full tree structure when using
1253 treemanifets.
1253 treemanifets.
1254
1254
1255 ``match`` is a matcher that can be used to hint to storage that not all
1255 ``match`` is a matcher that can be used to hint to storage that not all
1256 paths must be inspected; this is an optimization and can be safely
1256 paths must be inspected; this is an optimization and can be safely
1257 ignored. Note that the storage must still be able to reproduce a full
1257 ignored. Note that the storage must still be able to reproduce a full
1258 manifest including files that did not match.
1258 manifest including files that did not match.
1259 """
1259 """
1260
1260
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1262 revisionscount=False, trackedsize=False,
1262 revisionscount=False, trackedsize=False,
1263 storedsize=False):
1263 storedsize=False):
1264 """Obtain information about storage for this manifest's data.
1264 """Obtain information about storage for this manifest's data.
1265
1265
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1267 This one behaves the same way, except for manifest data.
1267 This one behaves the same way, except for manifest data.
1268 """
1268 """
1269
1269
1270 class imanifestlog(interfaceutil.Interface):
1270 class imanifestlog(interfaceutil.Interface):
1271 """Interface representing a collection of manifest snapshots.
1271 """Interface representing a collection of manifest snapshots.
1272
1272
1273 Represents the root manifest in a repository.
1273 Represents the root manifest in a repository.
1274
1274
1275 Also serves as a means to access nested tree manifests and to cache
1275 Also serves as a means to access nested tree manifests and to cache
1276 tree manifests.
1276 tree manifests.
1277 """
1277 """
1278
1278
1279 def __getitem__(node):
1279 def __getitem__(node):
1280 """Obtain a manifest instance for a given binary node.
1280 """Obtain a manifest instance for a given binary node.
1281
1281
1282 Equivalent to calling ``self.get('', node)``.
1282 Equivalent to calling ``self.get('', node)``.
1283
1283
1284 The returned object conforms to the ``imanifestrevisionstored``
1284 The returned object conforms to the ``imanifestrevisionstored``
1285 interface.
1285 interface.
1286 """
1286 """
1287
1287
1288 def get(tree, node, verify=True):
1288 def get(tree, node, verify=True):
1289 """Retrieve the manifest instance for a given directory and binary node.
1289 """Retrieve the manifest instance for a given directory and binary node.
1290
1290
1291 ``node`` always refers to the node of the root manifest (which will be
1291 ``node`` always refers to the node of the root manifest (which will be
1292 the only manifest if flat manifests are being used).
1292 the only manifest if flat manifests are being used).
1293
1293
1294 If ``tree`` is the empty string, the root manifest is returned.
1294 If ``tree`` is the empty string, the root manifest is returned.
1295 Otherwise the manifest for the specified directory will be returned
1295 Otherwise the manifest for the specified directory will be returned
1296 (requires tree manifests).
1296 (requires tree manifests).
1297
1297
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1299 known.
1299 known.
1300
1300
1301 The returned object conforms to the ``imanifestrevisionstored``
1301 The returned object conforms to the ``imanifestrevisionstored``
1302 interface.
1302 interface.
1303 """
1303 """
1304
1304
1305 def getstorage(tree):
1305 def getstorage(tree):
1306 """Retrieve an interface to storage for a particular tree.
1306 """Retrieve an interface to storage for a particular tree.
1307
1307
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1309 be returned. Otherwise storage for a tree manifest is returned.
1309 be returned. Otherwise storage for a tree manifest is returned.
1310
1310
1311 TODO formalize interface for returned object.
1311 TODO formalize interface for returned object.
1312 """
1312 """
1313
1313
1314 def clearcaches():
1314 def clearcaches():
1315 """Clear caches associated with this collection."""
1315 """Clear caches associated with this collection."""
1316
1316
1317 def rev(node):
1317 def rev(node):
1318 """Obtain the revision number for a binary node.
1318 """Obtain the revision number for a binary node.
1319
1319
1320 Raises ``error.LookupError`` if the node is not known.
1320 Raises ``error.LookupError`` if the node is not known.
1321 """
1321 """
1322
1322
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1324 """Local repository sub-interface providing access to tracked file storage.
1324 """Local repository sub-interface providing access to tracked file storage.
1325
1325
1326 This interface defines how a repository accesses storage for a single
1326 This interface defines how a repository accesses storage for a single
1327 tracked file path.
1327 tracked file path.
1328 """
1328 """
1329
1329
1330 def file(f):
1330 def file(f):
1331 """Obtain a filelog for a tracked path.
1331 """Obtain a filelog for a tracked path.
1332
1332
1333 The returned type conforms to the ``ifilestorage`` interface.
1333 The returned type conforms to the ``ifilestorage`` interface.
1334 """
1334 """
1335
1335
1336 class ilocalrepositorymain(interfaceutil.Interface):
1336 class ilocalrepositorymain(interfaceutil.Interface):
1337 """Main interface for local repositories.
1337 """Main interface for local repositories.
1338
1338
1339 This currently captures the reality of things - not how things should be.
1339 This currently captures the reality of things - not how things should be.
1340 """
1340 """
1341
1341
1342 supportedformats = interfaceutil.Attribute(
1342 supportedformats = interfaceutil.Attribute(
1343 """Set of requirements that apply to stream clone.
1343 """Set of requirements that apply to stream clone.
1344
1344
1345 This is actually a class attribute and is shared among all instances.
1345 This is actually a class attribute and is shared among all instances.
1346 """)
1346 """)
1347
1347
1348 supported = interfaceutil.Attribute(
1348 supported = interfaceutil.Attribute(
1349 """Set of requirements that this repo is capable of opening.""")
1349 """Set of requirements that this repo is capable of opening.""")
1350
1350
1351 requirements = interfaceutil.Attribute(
1351 requirements = interfaceutil.Attribute(
1352 """Set of requirements this repo uses.""")
1352 """Set of requirements this repo uses.""")
1353
1353
1354 features = interfaceutil.Attribute(
1354 features = interfaceutil.Attribute(
1355 """Set of "features" this repository supports.
1355 """Set of "features" this repository supports.
1356
1356
1357 A "feature" is a loosely-defined term. It can refer to a feature
1357 A "feature" is a loosely-defined term. It can refer to a feature
1358 in the classical sense or can describe an implementation detail
1358 in the classical sense or can describe an implementation detail
1359 of the repository. For example, a ``readonly`` feature may denote
1359 of the repository. For example, a ``readonly`` feature may denote
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1361 denote that the repository is using revlogs for file storage.
1361 denote that the repository is using revlogs for file storage.
1362
1362
1363 The intent of features is to provide a machine-queryable mechanism
1363 The intent of features is to provide a machine-queryable mechanism
1364 for repo consumers to test for various repository characteristics.
1364 for repo consumers to test for various repository characteristics.
1365
1365
1366 Features are similar to ``requirements``. The main difference is that
1366 Features are similar to ``requirements``. The main difference is that
1367 requirements are stored on-disk and represent requirements to open the
1367 requirements are stored on-disk and represent requirements to open the
1368 repository. Features are more run-time capabilities of the repository
1368 repository. Features are more run-time capabilities of the repository
1369 and more granular capabilities (which may be derived from requirements).
1369 and more granular capabilities (which may be derived from requirements).
1370 """)
1370 """)
1371
1371
1372 filtername = interfaceutil.Attribute(
1372 filtername = interfaceutil.Attribute(
1373 """Name of the repoview that is active on this repo.""")
1373 """Name of the repoview that is active on this repo.""")
1374
1374
1375 wvfs = interfaceutil.Attribute(
1375 wvfs = interfaceutil.Attribute(
1376 """VFS used to access the working directory.""")
1376 """VFS used to access the working directory.""")
1377
1377
1378 vfs = interfaceutil.Attribute(
1378 vfs = interfaceutil.Attribute(
1379 """VFS rooted at the .hg directory.
1379 """VFS rooted at the .hg directory.
1380
1380
1381 Used to access repository data not in the store.
1381 Used to access repository data not in the store.
1382 """)
1382 """)
1383
1383
1384 svfs = interfaceutil.Attribute(
1384 svfs = interfaceutil.Attribute(
1385 """VFS rooted at the store.
1385 """VFS rooted at the store.
1386
1386
1387 Used to access repository data in the store. Typically .hg/store.
1387 Used to access repository data in the store. Typically .hg/store.
1388 But can point elsewhere if the store is shared.
1388 But can point elsewhere if the store is shared.
1389 """)
1389 """)
1390
1390
1391 root = interfaceutil.Attribute(
1391 root = interfaceutil.Attribute(
1392 """Path to the root of the working directory.""")
1392 """Path to the root of the working directory.""")
1393
1393
1394 path = interfaceutil.Attribute(
1394 path = interfaceutil.Attribute(
1395 """Path to the .hg directory.""")
1395 """Path to the .hg directory.""")
1396
1396
1397 origroot = interfaceutil.Attribute(
1397 origroot = interfaceutil.Attribute(
1398 """The filesystem path that was used to construct the repo.""")
1398 """The filesystem path that was used to construct the repo.""")
1399
1399
1400 auditor = interfaceutil.Attribute(
1400 auditor = interfaceutil.Attribute(
1401 """A pathauditor for the working directory.
1401 """A pathauditor for the working directory.
1402
1402
1403 This checks if a path refers to a nested repository.
1403 This checks if a path refers to a nested repository.
1404
1404
1405 Operates on the filesystem.
1405 Operates on the filesystem.
1406 """)
1406 """)
1407
1407
1408 nofsauditor = interfaceutil.Attribute(
1408 nofsauditor = interfaceutil.Attribute(
1409 """A pathauditor for the working directory.
1409 """A pathauditor for the working directory.
1410
1410
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1412 """)
1412 """)
1413
1413
1414 baseui = interfaceutil.Attribute(
1414 baseui = interfaceutil.Attribute(
1415 """Original ui instance passed into constructor.""")
1415 """Original ui instance passed into constructor.""")
1416
1416
1417 ui = interfaceutil.Attribute(
1417 ui = interfaceutil.Attribute(
1418 """Main ui instance for this instance.""")
1418 """Main ui instance for this instance.""")
1419
1419
1420 sharedpath = interfaceutil.Attribute(
1420 sharedpath = interfaceutil.Attribute(
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1422
1422
1423 store = interfaceutil.Attribute(
1423 store = interfaceutil.Attribute(
1424 """A store instance.""")
1424 """A store instance.""")
1425
1425
1426 spath = interfaceutil.Attribute(
1426 spath = interfaceutil.Attribute(
1427 """Path to the store.""")
1427 """Path to the store.""")
1428
1428
1429 sjoin = interfaceutil.Attribute(
1429 sjoin = interfaceutil.Attribute(
1430 """Alias to self.store.join.""")
1430 """Alias to self.store.join.""")
1431
1431
1432 cachevfs = interfaceutil.Attribute(
1432 cachevfs = interfaceutil.Attribute(
1433 """A VFS used to access the cache directory.
1433 """A VFS used to access the cache directory.
1434
1434
1435 Typically .hg/cache.
1435 Typically .hg/cache.
1436 """)
1436 """)
1437
1437
1438 wcachevfs = interfaceutil.Attribute(
1438 wcachevfs = interfaceutil.Attribute(
1439 """A VFS used to access the cache directory dedicated to working copy
1439 """A VFS used to access the cache directory dedicated to working copy
1440
1440
1441 Typically .hg/wcache.
1441 Typically .hg/wcache.
1442 """)
1442 """)
1443
1443
1444 filteredrevcache = interfaceutil.Attribute(
1444 filteredrevcache = interfaceutil.Attribute(
1445 """Holds sets of revisions to be filtered.""")
1445 """Holds sets of revisions to be filtered.""")
1446
1446
1447 names = interfaceutil.Attribute(
1447 names = interfaceutil.Attribute(
1448 """A ``namespaces`` instance.""")
1448 """A ``namespaces`` instance.""")
1449
1449
1450 def close():
1450 def close():
1451 """Close the handle on this repository."""
1451 """Close the handle on this repository."""
1452
1452
1453 def peer():
1453 def peer():
1454 """Obtain an object conforming to the ``peer`` interface."""
1454 """Obtain an object conforming to the ``peer`` interface."""
1455
1455
1456 def unfiltered():
1456 def unfiltered():
1457 """Obtain an unfiltered/raw view of this repo."""
1457 """Obtain an unfiltered/raw view of this repo."""
1458
1458
1459 def filtered(name, visibilityexceptions=None):
1459 def filtered(name, visibilityexceptions=None):
1460 """Obtain a named view of this repository."""
1460 """Obtain a named view of this repository."""
1461
1461
1462 obsstore = interfaceutil.Attribute(
1462 obsstore = interfaceutil.Attribute(
1463 """A store of obsolescence data.""")
1463 """A store of obsolescence data.""")
1464
1464
1465 changelog = interfaceutil.Attribute(
1465 changelog = interfaceutil.Attribute(
1466 """A handle on the changelog revlog.""")
1466 """A handle on the changelog revlog.""")
1467
1467
1468 manifestlog = interfaceutil.Attribute(
1468 manifestlog = interfaceutil.Attribute(
1469 """An instance conforming to the ``imanifestlog`` interface.
1469 """An instance conforming to the ``imanifestlog`` interface.
1470
1470
1471 Provides access to manifests for the repository.
1471 Provides access to manifests for the repository.
1472 """)
1472 """)
1473
1473
1474 dirstate = interfaceutil.Attribute(
1474 dirstate = interfaceutil.Attribute(
1475 """Working directory state.""")
1475 """Working directory state.""")
1476
1476
1477 narrowpats = interfaceutil.Attribute(
1477 narrowpats = interfaceutil.Attribute(
1478 """Matcher patterns for this repository's narrowspec.""")
1478 """Matcher patterns for this repository's narrowspec.""")
1479
1479
1480 def narrowmatch(match=None, includeexact=False):
1480 def narrowmatch(match=None, includeexact=False):
1481 """Obtain a matcher for the narrowspec."""
1481 """Obtain a matcher for the narrowspec."""
1482
1482
1483 def setnarrowpats(newincludes, newexcludes):
1483 def setnarrowpats(newincludes, newexcludes):
1484 """Define the narrowspec for this repository."""
1484 """Define the narrowspec for this repository."""
1485
1485
1486 def __getitem__(changeid):
1486 def __getitem__(changeid):
1487 """Try to resolve a changectx."""
1487 """Try to resolve a changectx."""
1488
1488
1489 def __contains__(changeid):
1489 def __contains__(changeid):
1490 """Whether a changeset exists."""
1490 """Whether a changeset exists."""
1491
1491
1492 def __nonzero__():
1492 def __nonzero__():
1493 """Always returns True."""
1493 """Always returns True."""
1494 return True
1494 return True
1495
1495
1496 __bool__ = __nonzero__
1496 __bool__ = __nonzero__
1497
1497
1498 def __len__():
1498 def __len__():
1499 """Returns the number of changesets in the repo."""
1499 """Returns the number of changesets in the repo."""
1500
1500
1501 def __iter__():
1501 def __iter__():
1502 """Iterate over revisions in the changelog."""
1502 """Iterate over revisions in the changelog."""
1503
1503
1504 def revs(expr, *args):
1504 def revs(expr, *args):
1505 """Evaluate a revset.
1505 """Evaluate a revset.
1506
1506
1507 Emits revisions.
1507 Emits revisions.
1508 """
1508 """
1509
1509
1510 def set(expr, *args):
1510 def set(expr, *args):
1511 """Evaluate a revset.
1511 """Evaluate a revset.
1512
1512
1513 Emits changectx instances.
1513 Emits changectx instances.
1514 """
1514 """
1515
1515
1516 def anyrevs(specs, user=False, localalias=None):
1516 def anyrevs(specs, user=False, localalias=None):
1517 """Find revisions matching one of the given revsets."""
1517 """Find revisions matching one of the given revsets."""
1518
1518
1519 def url():
1519 def url():
1520 """Returns a string representing the location of this repo."""
1520 """Returns a string representing the location of this repo."""
1521
1521
1522 def hook(name, throw=False, **args):
1522 def hook(name, throw=False, **args):
1523 """Call a hook."""
1523 """Call a hook."""
1524
1524
1525 def tags():
1525 def tags():
1526 """Return a mapping of tag to node."""
1526 """Return a mapping of tag to node."""
1527
1527
1528 def tagtype(tagname):
1528 def tagtype(tagname):
1529 """Return the type of a given tag."""
1529 """Return the type of a given tag."""
1530
1530
1531 def tagslist():
1531 def tagslist():
1532 """Return a list of tags ordered by revision."""
1532 """Return a list of tags ordered by revision."""
1533
1533
1534 def nodetags(node):
1534 def nodetags(node):
1535 """Return the tags associated with a node."""
1535 """Return the tags associated with a node."""
1536
1536
1537 def nodebookmarks(node):
1537 def nodebookmarks(node):
1538 """Return the list of bookmarks pointing to the specified node."""
1538 """Return the list of bookmarks pointing to the specified node."""
1539
1539
1540 def branchmap():
1540 def branchmap():
1541 """Return a mapping of branch to heads in that branch."""
1541 """Return a mapping of branch to heads in that branch."""
1542
1542
1543 def revbranchcache():
1543 def revbranchcache():
1544 pass
1544 pass
1545
1545
1546 def branchtip(branchtip, ignoremissing=False):
1546 def branchtip(branchtip, ignoremissing=False):
1547 """Return the tip node for a given branch."""
1547 """Return the tip node for a given branch."""
1548
1548
1549 def lookup(key):
1549 def lookup(key):
1550 """Resolve the node for a revision."""
1550 """Resolve the node for a revision."""
1551
1551
1552 def lookupbranch(key):
1552 def lookupbranch(key):
1553 """Look up the branch name of the given revision or branch name."""
1553 """Look up the branch name of the given revision or branch name."""
1554
1554
1555 def known(nodes):
1555 def known(nodes):
1556 """Determine whether a series of nodes is known.
1556 """Determine whether a series of nodes is known.
1557
1557
1558 Returns a list of bools.
1558 Returns a list of bools.
1559 """
1559 """
1560
1560
1561 def local():
1561 def local():
1562 """Whether the repository is local."""
1562 """Whether the repository is local."""
1563 return True
1563 return True
1564
1564
1565 def publishing():
1565 def publishing():
1566 """Whether the repository is a publishing repository."""
1566 """Whether the repository is a publishing repository."""
1567
1567
1568 def cancopy():
1568 def cancopy():
1569 pass
1569 pass
1570
1570
1571 def shared():
1571 def shared():
1572 """The type of shared repository or None."""
1572 """The type of shared repository or None."""
1573
1573
1574 def wjoin(f, *insidef):
1574 def wjoin(f, *insidef):
1575 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1575 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1576
1576
1577 def setparents(p1, p2):
1577 def setparents(p1, p2):
1578 """Set the parent nodes of the working directory."""
1578 """Set the parent nodes of the working directory."""
1579
1579
1580 def filectx(path, changeid=None, fileid=None):
1580 def filectx(path, changeid=None, fileid=None):
1581 """Obtain a filectx for the given file revision."""
1581 """Obtain a filectx for the given file revision."""
1582
1582
1583 def getcwd():
1583 def getcwd():
1584 """Obtain the current working directory from the dirstate."""
1584 """Obtain the current working directory from the dirstate."""
1585
1585
1586 def pathto(f, cwd=None):
1586 def pathto(f, cwd=None):
1587 """Obtain the relative path to a file."""
1587 """Obtain the relative path to a file."""
1588
1588
1589 def adddatafilter(name, fltr):
1589 def adddatafilter(name, fltr):
1590 pass
1590 pass
1591
1591
1592 def wread(filename):
1592 def wread(filename):
1593 """Read a file from wvfs, using data filters."""
1593 """Read a file from wvfs, using data filters."""
1594
1594
1595 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1595 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1596 """Write data to a file in the wvfs, using data filters."""
1596 """Write data to a file in the wvfs, using data filters."""
1597
1597
1598 def wwritedata(filename, data):
1598 def wwritedata(filename, data):
1599 """Resolve data for writing to the wvfs, using data filters."""
1599 """Resolve data for writing to the wvfs, using data filters."""
1600
1600
1601 def currenttransaction():
1601 def currenttransaction():
1602 """Obtain the current transaction instance or None."""
1602 """Obtain the current transaction instance or None."""
1603
1603
1604 def transaction(desc, report=None):
1604 def transaction(desc, report=None):
1605 """Open a new transaction to write to the repository."""
1605 """Open a new transaction to write to the repository."""
1606
1606
1607 def undofiles():
1607 def undofiles():
1608 """Returns a list of (vfs, path) for files to undo transactions."""
1608 """Returns a list of (vfs, path) for files to undo transactions."""
1609
1609
1610 def recover():
1610 def recover():
1611 """Roll back an interrupted transaction."""
1611 """Roll back an interrupted transaction."""
1612
1612
1613 def rollback(dryrun=False, force=False):
1613 def rollback(dryrun=False, force=False):
1614 """Undo the last transaction.
1614 """Undo the last transaction.
1615
1615
1616 DANGEROUS.
1616 DANGEROUS.
1617 """
1617 """
1618
1618
1619 def updatecaches(tr=None, full=False):
1619 def updatecaches(tr=None, full=False):
1620 """Warm repo caches."""
1620 """Warm repo caches."""
1621
1621
1622 def invalidatecaches():
1622 def invalidatecaches():
1623 """Invalidate cached data due to the repository mutating."""
1623 """Invalidate cached data due to the repository mutating."""
1624
1624
1625 def invalidatevolatilesets():
1625 def invalidatevolatilesets():
1626 pass
1626 pass
1627
1627
1628 def invalidatedirstate():
1628 def invalidatedirstate():
1629 """Invalidate the dirstate."""
1629 """Invalidate the dirstate."""
1630
1630
1631 def invalidate(clearfilecache=False):
1631 def invalidate(clearfilecache=False):
1632 pass
1632 pass
1633
1633
1634 def invalidateall():
1634 def invalidateall():
1635 pass
1635 pass
1636
1636
1637 def lock(wait=True):
1637 def lock(wait=True):
1638 """Lock the repository store and return a lock instance."""
1638 """Lock the repository store and return a lock instance."""
1639
1639
1640 def wlock(wait=True):
1640 def wlock(wait=True):
1641 """Lock the non-store parts of the repository."""
1641 """Lock the non-store parts of the repository."""
1642
1642
1643 def currentwlock():
1643 def currentwlock():
1644 """Return the wlock if it's held or None."""
1644 """Return the wlock if it's held or None."""
1645
1645
1646 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1646 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1647 pass
1647 pass
1648
1648
1649 def commit(text='', user=None, date=None, match=None, force=False,
1649 def commit(text='', user=None, date=None, match=None, force=False,
1650 editor=False, extra=None):
1650 editor=False, extra=None):
1651 """Add a new revision to the repository."""
1651 """Add a new revision to the repository."""
1652
1652
1653 def commitctx(ctx, error=False):
1653 def commitctx(ctx, error=False):
1654 """Commit a commitctx instance to the repository."""
1654 """Commit a commitctx instance to the repository."""
1655
1655
1656 def destroying():
1656 def destroying():
1657 """Inform the repository that nodes are about to be destroyed."""
1657 """Inform the repository that nodes are about to be destroyed."""
1658
1658
1659 def destroyed():
1659 def destroyed():
1660 """Inform the repository that nodes have been destroyed."""
1660 """Inform the repository that nodes have been destroyed."""
1661
1661
1662 def status(node1='.', node2=None, match=None, ignored=False,
1662 def status(node1='.', node2=None, match=None, ignored=False,
1663 clean=False, unknown=False, listsubrepos=False):
1663 clean=False, unknown=False, listsubrepos=False):
1664 """Convenience method to call repo[x].status()."""
1664 """Convenience method to call repo[x].status()."""
1665
1665
1666 def addpostdsstatus(ps):
1666 def addpostdsstatus(ps):
1667 pass
1667 pass
1668
1668
1669 def postdsstatus():
1669 def postdsstatus():
1670 pass
1670 pass
1671
1671
1672 def clearpostdsstatus():
1672 def clearpostdsstatus():
1673 pass
1673 pass
1674
1674
1675 def heads(start=None):
1675 def heads(start=None):
1676 """Obtain list of nodes that are DAG heads."""
1676 """Obtain list of nodes that are DAG heads."""
1677
1677
1678 def branchheads(branch=None, start=None, closed=False):
1678 def branchheads(branch=None, start=None, closed=False):
1679 pass
1679 pass
1680
1680
1681 def branches(nodes):
1681 def branches(nodes):
1682 pass
1682 pass
1683
1683
1684 def between(pairs):
1684 def between(pairs):
1685 pass
1685 pass
1686
1686
1687 def checkpush(pushop):
1687 def checkpush(pushop):
1688 pass
1688 pass
1689
1689
1690 prepushoutgoinghooks = interfaceutil.Attribute(
1690 prepushoutgoinghooks = interfaceutil.Attribute(
1691 """util.hooks instance.""")
1691 """util.hooks instance.""")
1692
1692
1693 def pushkey(namespace, key, old, new):
1693 def pushkey(namespace, key, old, new):
1694 pass
1694 pass
1695
1695
1696 def listkeys(namespace):
1696 def listkeys(namespace):
1697 pass
1697 pass
1698
1698
1699 def debugwireargs(one, two, three=None, four=None, five=None):
1699 def debugwireargs(one, two, three=None, four=None, five=None):
1700 pass
1700 pass
1701
1701
1702 def savecommitmessage(text):
1702 def savecommitmessage(text):
1703 pass
1703 pass
1704
1704
1705 class completelocalrepository(ilocalrepositorymain,
1705 class completelocalrepository(ilocalrepositorymain,
1706 ilocalrepositoryfilestorage):
1706 ilocalrepositoryfilestorage):
1707 """Complete interface for a local repository."""
1707 """Complete interface for a local repository."""
1708
1708
1709 class iwireprotocolcommandcacher(interfaceutil.Interface):
1709 class iwireprotocolcommandcacher(interfaceutil.Interface):
1710 """Represents a caching backend for wire protocol commands.
1710 """Represents a caching backend for wire protocol commands.
1711
1711
1712 Wire protocol version 2 supports transparent caching of many commands.
1712 Wire protocol version 2 supports transparent caching of many commands.
1713 To leverage this caching, servers can activate objects that cache
1713 To leverage this caching, servers can activate objects that cache
1714 command responses. Objects handle both cache writing and reading.
1714 command responses. Objects handle both cache writing and reading.
1715 This interface defines how that response caching mechanism works.
1715 This interface defines how that response caching mechanism works.
1716
1716
1717 Wire protocol version 2 commands emit a series of objects that are
1717 Wire protocol version 2 commands emit a series of objects that are
1718 serialized and sent to the client. The caching layer exists between
1718 serialized and sent to the client. The caching layer exists between
1719 the invocation of the command function and the sending of its output
1719 the invocation of the command function and the sending of its output
1720 objects to an output layer.
1720 objects to an output layer.
1721
1721
1722 Instances of this interface represent a binding to a cache that
1722 Instances of this interface represent a binding to a cache that
1723 can serve a response (in place of calling a command function) and/or
1723 can serve a response (in place of calling a command function) and/or
1724 write responses to a cache for subsequent use.
1724 write responses to a cache for subsequent use.
1725
1725
1726 When a command request arrives, the following happens with regards
1726 When a command request arrives, the following happens with regards
1727 to this interface:
1727 to this interface:
1728
1728
1729 1. The server determines whether the command request is cacheable.
1729 1. The server determines whether the command request is cacheable.
1730 2. If it is, an instance of this interface is spawned.
1730 2. If it is, an instance of this interface is spawned.
1731 3. The cacher is activated in a context manager (``__enter__`` is called).
1731 3. The cacher is activated in a context manager (``__enter__`` is called).
1732 4. A cache *key* for that request is derived. This will call the
1732 4. A cache *key* for that request is derived. This will call the
1733 instance's ``adjustcachekeystate()`` method so the derivation
1733 instance's ``adjustcachekeystate()`` method so the derivation
1734 can be influenced.
1734 can be influenced.
1735 5. The cacher is informed of the derived cache key via a call to
1735 5. The cacher is informed of the derived cache key via a call to
1736 ``setcachekey()``.
1736 ``setcachekey()``.
1737 6. The cacher's ``lookup()`` method is called to test for presence of
1737 6. The cacher's ``lookup()`` method is called to test for presence of
1738 the derived key in the cache.
1738 the derived key in the cache.
1739 7. If ``lookup()`` returns a hit, that cached result is used in place
1739 7. If ``lookup()`` returns a hit, that cached result is used in place
1740 of invoking the command function. ``__exit__`` is called and the instance
1740 of invoking the command function. ``__exit__`` is called and the instance
1741 is discarded.
1741 is discarded.
1742 8. The command function is invoked.
1742 8. The command function is invoked.
1743 9. ``onobject()`` is called for each object emitted by the command
1743 9. ``onobject()`` is called for each object emitted by the command
1744 function.
1744 function.
1745 10. After the final object is seen, ``onfinished()`` is called.
1745 10. After the final object is seen, ``onfinished()`` is called.
1746 11. ``__exit__`` is called to signal the end of use of the instance.
1746 11. ``__exit__`` is called to signal the end of use of the instance.
1747
1747
1748 Cache *key* derivation can be influenced by the instance.
1748 Cache *key* derivation can be influenced by the instance.
1749
1749
1750 Cache keys are initially derived by a deterministic representation of
1750 Cache keys are initially derived by a deterministic representation of
1751 the command request. This includes the command name, arguments, protocol
1751 the command request. This includes the command name, arguments, protocol
1752 version, etc. This initial key derivation is performed by CBOR-encoding a
1752 version, etc. This initial key derivation is performed by CBOR-encoding a
1753 data structure and feeding that output into a hasher.
1753 data structure and feeding that output into a hasher.
1754
1754
1755 Instances of this interface can influence this initial key derivation
1755 Instances of this interface can influence this initial key derivation
1756 via ``adjustcachekeystate()``.
1756 via ``adjustcachekeystate()``.
1757
1757
1758 The instance is informed of the derived cache key via a call to
1758 The instance is informed of the derived cache key via a call to
1759 ``setcachekey()``. The instance must store the key locally so it can
1759 ``setcachekey()``. The instance must store the key locally so it can
1760 be consulted on subsequent operations that may require it.
1760 be consulted on subsequent operations that may require it.
1761
1761
1762 When constructed, the instance has access to a callable that can be used
1762 When constructed, the instance has access to a callable that can be used
1763 for encoding response objects. This callable receives as its single
1763 for encoding response objects. This callable receives as its single
1764 argument an object emitted by a command function. It returns an iterable
1764 argument an object emitted by a command function. It returns an iterable
1765 of bytes chunks representing the encoded object. Unless the cacher is
1765 of bytes chunks representing the encoded object. Unless the cacher is
1766 caching native Python objects in memory or has a way of reconstructing
1766 caching native Python objects in memory or has a way of reconstructing
1767 the original Python objects, implementations typically call this function
1767 the original Python objects, implementations typically call this function
1768 to produce bytes from the output objects and then store those bytes in
1768 to produce bytes from the output objects and then store those bytes in
1769 the cache. When it comes time to re-emit those bytes, they are wrapped
1769 the cache. When it comes time to re-emit those bytes, they are wrapped
1770 in a ``wireprototypes.encodedresponse`` instance to tell the output
1770 in a ``wireprototypes.encodedresponse`` instance to tell the output
1771 layer that they are pre-encoded.
1771 layer that they are pre-encoded.
1772
1772
1773 When receiving the objects emitted by the command function, instances
1773 When receiving the objects emitted by the command function, instances
1774 can choose what to do with those objects. The simplest thing to do is
1774 can choose what to do with those objects. The simplest thing to do is
1775 re-emit the original objects. They will be forwarded to the output
1775 re-emit the original objects. They will be forwarded to the output
1776 layer and will be processed as if the cacher did not exist.
1776 layer and will be processed as if the cacher did not exist.
1777
1777
1778 Implementations could also choose to not emit objects - instead locally
1778 Implementations could also choose to not emit objects - instead locally
1779 buffering objects or their encoded representation. They could then emit
1779 buffering objects or their encoded representation. They could then emit
1780 a single "coalesced" object when ``onfinished()`` is called. In
1780 a single "coalesced" object when ``onfinished()`` is called. In
1781 this way, the implementation would function as a filtering layer of
1781 this way, the implementation would function as a filtering layer of
1782 sorts.
1782 sorts.
1783
1783
1784 When caching objects, typically the encoded form of the object will
1784 When caching objects, typically the encoded form of the object will
1785 be stored. Keep in mind that if the original object is forwarded to
1785 be stored. Keep in mind that if the original object is forwarded to
1786 the output layer, it will need to be encoded there as well. For large
1786 the output layer, it will need to be encoded there as well. For large
1787 output, this redundant encoding could add overhead. Implementations
1787 output, this redundant encoding could add overhead. Implementations
1788 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1788 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1789 instances to avoid this overhead.
1789 instances to avoid this overhead.
1790 """
1790 """
1791 def __enter__():
1791 def __enter__():
1792 """Marks the instance as active.
1792 """Marks the instance as active.
1793
1793
1794 Should return self.
1794 Should return self.
1795 """
1795 """
1796
1796
1797 def __exit__(exctype, excvalue, exctb):
1797 def __exit__(exctype, excvalue, exctb):
1798 """Called when cacher is no longer used.
1798 """Called when cacher is no longer used.
1799
1799
1800 This can be used by implementations to perform cleanup actions (e.g.
1800 This can be used by implementations to perform cleanup actions (e.g.
1801 disconnecting network sockets, aborting a partially cached response.
1801 disconnecting network sockets, aborting a partially cached response.
1802 """
1802 """
1803
1803
1804 def adjustcachekeystate(state):
1804 def adjustcachekeystate(state):
1805 """Influences cache key derivation by adjusting state to derive key.
1805 """Influences cache key derivation by adjusting state to derive key.
1806
1806
1807 A dict defining the state used to derive the cache key is passed.
1807 A dict defining the state used to derive the cache key is passed.
1808
1808
1809 Implementations can modify this dict to record additional state that
1809 Implementations can modify this dict to record additional state that
1810 is wanted to influence key derivation.
1810 is wanted to influence key derivation.
1811
1811
1812 Implementations are *highly* encouraged to not modify or delete
1812 Implementations are *highly* encouraged to not modify or delete
1813 existing keys.
1813 existing keys.
1814 """
1814 """
1815
1815
1816 def setcachekey(key):
1816 def setcachekey(key):
1817 """Record the derived cache key for this request.
1817 """Record the derived cache key for this request.
1818
1818
1819 Instances may mutate the key for internal usage, as desired. e.g.
1819 Instances may mutate the key for internal usage, as desired. e.g.
1820 instances may wish to prepend the repo name, introduce path
1820 instances may wish to prepend the repo name, introduce path
1821 components for filesystem or URL addressing, etc. Behavior is up to
1821 components for filesystem or URL addressing, etc. Behavior is up to
1822 the cache.
1822 the cache.
1823
1823
1824 Returns a bool indicating if the request is cacheable by this
1824 Returns a bool indicating if the request is cacheable by this
1825 instance.
1825 instance.
1826 """
1826 """
1827
1827
1828 def lookup():
1828 def lookup():
1829 """Attempt to resolve an entry in the cache.
1829 """Attempt to resolve an entry in the cache.
1830
1830
1831 The instance is instructed to look for the cache key that it was
1831 The instance is instructed to look for the cache key that it was
1832 informed about via the call to ``setcachekey()``.
1832 informed about via the call to ``setcachekey()``.
1833
1833
1834 If there's no cache hit or the cacher doesn't wish to use the cached
1834 If there's no cache hit or the cacher doesn't wish to use the cached
1835 entry, ``None`` should be returned.
1835 entry, ``None`` should be returned.
1836
1836
1837 Else, a dict defining the cached result should be returned. The
1837 Else, a dict defining the cached result should be returned. The
1838 dict may have the following keys:
1838 dict may have the following keys:
1839
1839
1840 objs
1840 objs
1841 An iterable of objects that should be sent to the client. That
1841 An iterable of objects that should be sent to the client. That
1842 iterable of objects is expected to be what the command function
1842 iterable of objects is expected to be what the command function
1843 would return if invoked or an equivalent representation thereof.
1843 would return if invoked or an equivalent representation thereof.
1844 """
1844 """
1845
1845
1846 def onobject(obj):
1846 def onobject(obj):
1847 """Called when a new object is emitted from the command function.
1847 """Called when a new object is emitted from the command function.
1848
1848
1849 Receives as its argument the object that was emitted from the
1849 Receives as its argument the object that was emitted from the
1850 command function.
1850 command function.
1851
1851
1852 This method returns an iterator of objects to forward to the output
1852 This method returns an iterator of objects to forward to the output
1853 layer. The easiest implementation is a generator that just
1853 layer. The easiest implementation is a generator that just
1854 ``yield obj``.
1854 ``yield obj``.
1855 """
1855 """
1856
1856
1857 def onfinished():
1857 def onfinished():
1858 """Called after all objects have been emitted from the command function.
1858 """Called after all objects have been emitted from the command function.
1859
1859
1860 Implementations should return an iterator of objects to forward to
1860 Implementations should return an iterator of objects to forward to
1861 the output layer.
1861 the output layer.
1862
1862
1863 This method can be a generator.
1863 This method can be a generator.
1864 """
1864 """
General Comments 0
You need to be logged in to leave comments. Login now