##// END OF EJS Templates
interfaceutil: module to stub out zope.interface...
Gregory Szorc -
r37828:856f381a stable
parent child Browse files
Show More
@@ -0,0 +1,40 b''
1 # interfaceutil.py - Utilities for declaring interfaces.
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 # zope.interface imposes a run-time cost due to module import overhead and
9 # bookkeeping for declaring interfaces. So, we use stubs for various
10 # zope.interface primitives unless instructed otherwise.
11
12 from __future__ import absolute_import
13
14 from .. import (
15 encoding,
16 )
17
18 if encoding.environ.get('HGREALINTERFACES'):
19 from ..thirdparty.zope import (
20 interface as zi,
21 )
22
23 Attribute = zi.Attribute
24 Interface = zi.Interface
25 implementer = zi.implementer
26 else:
27 class Attribute(object):
28 def __init__(self, __name__, __doc__=''):
29 pass
30
31 class Interface(object):
32 def __init__(self, name, bases=(), attrs=None, __doc__=None,
33 __module__=None):
34 pass
35
36 def implementer(*ifaces):
37 def wrapper(cls):
38 return cls
39
40 return wrapper
@@ -1,267 +1,267 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .thirdparty.zope import (
11 interface as zi,
12 )
13 from . import (
10 from . import (
14 error,
11 error,
15 repository,
12 repository,
16 revlog,
13 revlog,
17 )
14 )
15 from .utils import (
16 interfaceutil,
17 )
18
18
19 @zi.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # full name of the user visible file, relative to the repository root
25 # full name of the user visible file, relative to the repository root
26 self.filename = path
26 self.filename = path
27 self.index = self._revlog.index
27 self.index = self._revlog.index
28 self.version = self._revlog.version
28 self.version = self._revlog.version
29 self.storedeltachains = self._revlog.storedeltachains
29 self.storedeltachains = self._revlog.storedeltachains
30 self._generaldelta = self._revlog._generaldelta
30 self._generaldelta = self._revlog._generaldelta
31
31
32 def __len__(self):
32 def __len__(self):
33 return len(self._revlog)
33 return len(self._revlog)
34
34
35 def __iter__(self):
35 def __iter__(self):
36 return self._revlog.__iter__()
36 return self._revlog.__iter__()
37
37
38 def revs(self, start=0, stop=None):
38 def revs(self, start=0, stop=None):
39 return self._revlog.revs(start=start, stop=stop)
39 return self._revlog.revs(start=start, stop=stop)
40
40
41 def parents(self, node):
41 def parents(self, node):
42 return self._revlog.parents(node)
42 return self._revlog.parents(node)
43
43
44 def parentrevs(self, rev):
44 def parentrevs(self, rev):
45 return self._revlog.parentrevs(rev)
45 return self._revlog.parentrevs(rev)
46
46
47 def rev(self, node):
47 def rev(self, node):
48 return self._revlog.rev(node)
48 return self._revlog.rev(node)
49
49
50 def node(self, rev):
50 def node(self, rev):
51 return self._revlog.node(rev)
51 return self._revlog.node(rev)
52
52
53 def lookup(self, node):
53 def lookup(self, node):
54 return self._revlog.lookup(node)
54 return self._revlog.lookup(node)
55
55
56 def linkrev(self, rev):
56 def linkrev(self, rev):
57 return self._revlog.linkrev(rev)
57 return self._revlog.linkrev(rev)
58
58
59 def flags(self, rev):
59 def flags(self, rev):
60 return self._revlog.flags(rev)
60 return self._revlog.flags(rev)
61
61
62 def commonancestorsheads(self, node1, node2):
62 def commonancestorsheads(self, node1, node2):
63 return self._revlog.commonancestorsheads(node1, node2)
63 return self._revlog.commonancestorsheads(node1, node2)
64
64
65 def descendants(self, revs):
65 def descendants(self, revs):
66 return self._revlog.descendants(revs)
66 return self._revlog.descendants(revs)
67
67
68 def headrevs(self):
68 def headrevs(self):
69 return self._revlog.headrevs()
69 return self._revlog.headrevs()
70
70
71 def heads(self, start=None, stop=None):
71 def heads(self, start=None, stop=None):
72 return self._revlog.heads(start, stop)
72 return self._revlog.heads(start, stop)
73
73
74 def children(self, node):
74 def children(self, node):
75 return self._revlog.children(node)
75 return self._revlog.children(node)
76
76
77 def deltaparent(self, rev):
77 def deltaparent(self, rev):
78 return self._revlog.deltaparent(rev)
78 return self._revlog.deltaparent(rev)
79
79
80 def candelta(self, baserev, rev):
80 def candelta(self, baserev, rev):
81 return self._revlog.candelta(baserev, rev)
81 return self._revlog.candelta(baserev, rev)
82
82
83 def iscensored(self, rev):
83 def iscensored(self, rev):
84 return self._revlog.iscensored(rev)
84 return self._revlog.iscensored(rev)
85
85
86 def rawsize(self, rev):
86 def rawsize(self, rev):
87 return self._revlog.rawsize(rev)
87 return self._revlog.rawsize(rev)
88
88
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
91
91
92 def revision(self, node, _df=None, raw=False):
92 def revision(self, node, _df=None, raw=False):
93 return self._revlog.revision(node, _df=_df, raw=raw)
93 return self._revlog.revision(node, _df=_df, raw=raw)
94
94
95 def revdiff(self, rev1, rev2):
95 def revdiff(self, rev1, rev2):
96 return self._revlog.revdiff(rev1, rev2)
96 return self._revlog.revdiff(rev1, rev2)
97
97
98 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
98 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
99 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
99 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
100 cachedelta=None):
100 cachedelta=None):
101 return self._revlog.addrevision(revisiondata, transaction, linkrev,
101 return self._revlog.addrevision(revisiondata, transaction, linkrev,
102 p1, p2, node=node, flags=flags,
102 p1, p2, node=node, flags=flags,
103 cachedelta=cachedelta)
103 cachedelta=cachedelta)
104
104
105 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
105 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
106 return self._revlog.addgroup(deltas, linkmapper, transaction,
106 return self._revlog.addgroup(deltas, linkmapper, transaction,
107 addrevisioncb=addrevisioncb)
107 addrevisioncb=addrevisioncb)
108
108
109 def getstrippoint(self, minlink):
109 def getstrippoint(self, minlink):
110 return self._revlog.getstrippoint(minlink)
110 return self._revlog.getstrippoint(minlink)
111
111
112 def strip(self, minlink, transaction):
112 def strip(self, minlink, transaction):
113 return self._revlog.strip(minlink, transaction)
113 return self._revlog.strip(minlink, transaction)
114
114
115 def files(self):
115 def files(self):
116 return self._revlog.files()
116 return self._revlog.files()
117
117
118 def checksize(self):
118 def checksize(self):
119 return self._revlog.checksize()
119 return self._revlog.checksize()
120
120
121 def read(self, node):
121 def read(self, node):
122 t = self.revision(node)
122 t = self.revision(node)
123 if not t.startswith('\1\n'):
123 if not t.startswith('\1\n'):
124 return t
124 return t
125 s = t.index('\1\n', 2)
125 s = t.index('\1\n', 2)
126 return t[s + 2:]
126 return t[s + 2:]
127
127
128 def add(self, text, meta, transaction, link, p1=None, p2=None):
128 def add(self, text, meta, transaction, link, p1=None, p2=None):
129 if meta or text.startswith('\1\n'):
129 if meta or text.startswith('\1\n'):
130 text = revlog.packmeta(meta, text)
130 text = revlog.packmeta(meta, text)
131 return self.addrevision(text, transaction, link, p1, p2)
131 return self.addrevision(text, transaction, link, p1, p2)
132
132
133 def renamed(self, node):
133 def renamed(self, node):
134 if self.parents(node)[0] != revlog.nullid:
134 if self.parents(node)[0] != revlog.nullid:
135 return False
135 return False
136 t = self.revision(node)
136 t = self.revision(node)
137 m = revlog.parsemeta(t)[0]
137 m = revlog.parsemeta(t)[0]
138 if m and "copy" in m:
138 if m and "copy" in m:
139 return (m["copy"], revlog.bin(m["copyrev"]))
139 return (m["copy"], revlog.bin(m["copyrev"]))
140 return False
140 return False
141
141
142 def size(self, rev):
142 def size(self, rev):
143 """return the size of a given revision"""
143 """return the size of a given revision"""
144
144
145 # for revisions with renames, we have to go the slow way
145 # for revisions with renames, we have to go the slow way
146 node = self.node(rev)
146 node = self.node(rev)
147 if self.renamed(node):
147 if self.renamed(node):
148 return len(self.read(node))
148 return len(self.read(node))
149 if self.iscensored(rev):
149 if self.iscensored(rev):
150 return 0
150 return 0
151
151
152 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
152 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
153 return self._revlog.size(rev)
153 return self._revlog.size(rev)
154
154
155 def cmp(self, node, text):
155 def cmp(self, node, text):
156 """compare text with a given file revision
156 """compare text with a given file revision
157
157
158 returns True if text is different than what is stored.
158 returns True if text is different than what is stored.
159 """
159 """
160
160
161 t = text
161 t = text
162 if text.startswith('\1\n'):
162 if text.startswith('\1\n'):
163 t = '\1\n\1\n' + text
163 t = '\1\n\1\n' + text
164
164
165 samehashes = not self._revlog.cmp(node, t)
165 samehashes = not self._revlog.cmp(node, t)
166 if samehashes:
166 if samehashes:
167 return False
167 return False
168
168
169 # censored files compare against the empty file
169 # censored files compare against the empty file
170 if self.iscensored(self.rev(node)):
170 if self.iscensored(self.rev(node)):
171 return text != ''
171 return text != ''
172
172
173 # renaming a file produces a different hash, even if the data
173 # renaming a file produces a different hash, even if the data
174 # remains unchanged. Check if it's the case (slow):
174 # remains unchanged. Check if it's the case (slow):
175 if self.renamed(node):
175 if self.renamed(node):
176 t2 = self.read(node)
176 t2 = self.read(node)
177 return t2 != text
177 return t2 != text
178
178
179 return True
179 return True
180
180
181 @property
181 @property
182 def filename(self):
182 def filename(self):
183 return self._revlog.filename
183 return self._revlog.filename
184
184
185 @filename.setter
185 @filename.setter
186 def filename(self, value):
186 def filename(self, value):
187 self._revlog.filename = value
187 self._revlog.filename = value
188
188
189 # TODO these aren't part of the interface and aren't internal methods.
189 # TODO these aren't part of the interface and aren't internal methods.
190 # Callers should be fixed to not use them.
190 # Callers should be fixed to not use them.
191 @property
191 @property
192 def indexfile(self):
192 def indexfile(self):
193 return self._revlog.indexfile
193 return self._revlog.indexfile
194
194
195 @indexfile.setter
195 @indexfile.setter
196 def indexfile(self, value):
196 def indexfile(self, value):
197 self._revlog.indexfile = value
197 self._revlog.indexfile = value
198
198
199 @property
199 @property
200 def datafile(self):
200 def datafile(self):
201 return self._revlog.datafile
201 return self._revlog.datafile
202
202
203 @property
203 @property
204 def opener(self):
204 def opener(self):
205 return self._revlog.opener
205 return self._revlog.opener
206
206
207 @property
207 @property
208 def _lazydeltabase(self):
208 def _lazydeltabase(self):
209 return self._revlog._lazydeltabase
209 return self._revlog._lazydeltabase
210
210
211 @_lazydeltabase.setter
211 @_lazydeltabase.setter
212 def _lazydeltabase(self, value):
212 def _lazydeltabase(self, value):
213 self._revlog._lazydeltabase = value
213 self._revlog._lazydeltabase = value
214
214
215 @property
215 @property
216 def _aggressivemergedeltas(self):
216 def _aggressivemergedeltas(self):
217 return self._revlog._aggressivemergedeltas
217 return self._revlog._aggressivemergedeltas
218
218
219 @_aggressivemergedeltas.setter
219 @_aggressivemergedeltas.setter
220 def _aggressivemergedeltas(self, value):
220 def _aggressivemergedeltas(self, value):
221 self._revlog._aggressivemergedeltas = value
221 self._revlog._aggressivemergedeltas = value
222
222
223 @property
223 @property
224 def _inline(self):
224 def _inline(self):
225 return self._revlog._inline
225 return self._revlog._inline
226
226
227 @property
227 @property
228 def _withsparseread(self):
228 def _withsparseread(self):
229 return getattr(self._revlog, '_withsparseread', False)
229 return getattr(self._revlog, '_withsparseread', False)
230
230
231 @property
231 @property
232 def _srmingapsize(self):
232 def _srmingapsize(self):
233 return self._revlog._srmingapsize
233 return self._revlog._srmingapsize
234
234
235 @property
235 @property
236 def _srdensitythreshold(self):
236 def _srdensitythreshold(self):
237 return self._revlog._srdensitythreshold
237 return self._revlog._srdensitythreshold
238
238
239 def _deltachain(self, rev, stoprev=None):
239 def _deltachain(self, rev, stoprev=None):
240 return self._revlog._deltachain(rev, stoprev)
240 return self._revlog._deltachain(rev, stoprev)
241
241
242 def chainbase(self, rev):
242 def chainbase(self, rev):
243 return self._revlog.chainbase(rev)
243 return self._revlog.chainbase(rev)
244
244
245 def chainlen(self, rev):
245 def chainlen(self, rev):
246 return self._revlog.chainlen(rev)
246 return self._revlog.chainlen(rev)
247
247
248 def clone(self, tr, destrevlog, **kwargs):
248 def clone(self, tr, destrevlog, **kwargs):
249 if not isinstance(destrevlog, filelog):
249 if not isinstance(destrevlog, filelog):
250 raise error.ProgrammingError('expected filelog to clone()')
250 raise error.ProgrammingError('expected filelog to clone()')
251
251
252 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
252 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
253
253
254 def start(self, rev):
254 def start(self, rev):
255 return self._revlog.start(rev)
255 return self._revlog.start(rev)
256
256
257 def end(self, rev):
257 def end(self, rev):
258 return self._revlog.end(rev)
258 return self._revlog.end(rev)
259
259
260 def length(self, rev):
260 def length(self, rev):
261 return self._revlog.length(rev)
261 return self._revlog.length(rev)
262
262
263 def compress(self, data):
263 def compress(self, data):
264 return self._revlog.compress(data)
264 return self._revlog.compress(data)
265
265
266 def _addrevision(self, *args, **kwargs):
266 def _addrevision(self, *args, **kwargs):
267 return self._revlog._addrevision(*args, **kwargs)
267 return self._revlog._addrevision(*args, **kwargs)
@@ -1,960 +1,961 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import tempfile
16 import tempfile
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .thirdparty import (
20 from .thirdparty import (
21 cbor,
21 cbor,
22 )
22 )
23 from .thirdparty.zope import (
24 interface as zi,
25 )
26 from . import (
23 from . import (
27 bundle2,
24 bundle2,
28 error,
25 error,
29 httpconnection,
26 httpconnection,
30 pycompat,
27 pycompat,
31 repository,
28 repository,
32 statichttprepo,
29 statichttprepo,
33 url as urlmod,
30 url as urlmod,
34 util,
31 util,
35 wireprotoframing,
32 wireprotoframing,
36 wireprototypes,
33 wireprototypes,
37 wireprotov1peer,
34 wireprotov1peer,
38 wireprotov2peer,
35 wireprotov2peer,
39 wireprotov2server,
36 wireprotov2server,
40 )
37 )
38 from .utils import (
39 interfaceutil,
40 )
41
41
42 httplib = util.httplib
42 httplib = util.httplib
43 urlerr = util.urlerr
43 urlerr = util.urlerr
44 urlreq = util.urlreq
44 urlreq = util.urlreq
45
45
46 def encodevalueinheaders(value, header, limit):
46 def encodevalueinheaders(value, header, limit):
47 """Encode a string value into multiple HTTP headers.
47 """Encode a string value into multiple HTTP headers.
48
48
49 ``value`` will be encoded into 1 or more HTTP headers with the names
49 ``value`` will be encoded into 1 or more HTTP headers with the names
50 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
50 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
51 name + value will be at most ``limit`` bytes long.
51 name + value will be at most ``limit`` bytes long.
52
52
53 Returns an iterable of 2-tuples consisting of header names and
53 Returns an iterable of 2-tuples consisting of header names and
54 values as native strings.
54 values as native strings.
55 """
55 """
56 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
56 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
57 # not bytes. This function always takes bytes in as arguments.
57 # not bytes. This function always takes bytes in as arguments.
58 fmt = pycompat.strurl(header) + r'-%s'
58 fmt = pycompat.strurl(header) + r'-%s'
59 # Note: it is *NOT* a bug that the last bit here is a bytestring
59 # Note: it is *NOT* a bug that the last bit here is a bytestring
60 # and not a unicode: we're just getting the encoded length anyway,
60 # and not a unicode: we're just getting the encoded length anyway,
61 # and using an r-string to make it portable between Python 2 and 3
61 # and using an r-string to make it portable between Python 2 and 3
62 # doesn't work because then the \r is a literal backslash-r
62 # doesn't work because then the \r is a literal backslash-r
63 # instead of a carriage return.
63 # instead of a carriage return.
64 valuelen = limit - len(fmt % r'000') - len(': \r\n')
64 valuelen = limit - len(fmt % r'000') - len(': \r\n')
65 result = []
65 result = []
66
66
67 n = 0
67 n = 0
68 for i in xrange(0, len(value), valuelen):
68 for i in xrange(0, len(value), valuelen):
69 n += 1
69 n += 1
70 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
70 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
71
71
72 return result
72 return result
73
73
74 def _wraphttpresponse(resp):
74 def _wraphttpresponse(resp):
75 """Wrap an HTTPResponse with common error handlers.
75 """Wrap an HTTPResponse with common error handlers.
76
76
77 This ensures that any I/O from any consumer raises the appropriate
77 This ensures that any I/O from any consumer raises the appropriate
78 error and messaging.
78 error and messaging.
79 """
79 """
80 origread = resp.read
80 origread = resp.read
81
81
82 class readerproxy(resp.__class__):
82 class readerproxy(resp.__class__):
83 def read(self, size=None):
83 def read(self, size=None):
84 try:
84 try:
85 return origread(size)
85 return origread(size)
86 except httplib.IncompleteRead as e:
86 except httplib.IncompleteRead as e:
87 # e.expected is an integer if length known or None otherwise.
87 # e.expected is an integer if length known or None otherwise.
88 if e.expected:
88 if e.expected:
89 msg = _('HTTP request error (incomplete response; '
89 msg = _('HTTP request error (incomplete response; '
90 'expected %d bytes got %d)') % (e.expected,
90 'expected %d bytes got %d)') % (e.expected,
91 len(e.partial))
91 len(e.partial))
92 else:
92 else:
93 msg = _('HTTP request error (incomplete response)')
93 msg = _('HTTP request error (incomplete response)')
94
94
95 raise error.PeerTransportError(
95 raise error.PeerTransportError(
96 msg,
96 msg,
97 hint=_('this may be an intermittent network failure; '
97 hint=_('this may be an intermittent network failure; '
98 'if the error persists, consider contacting the '
98 'if the error persists, consider contacting the '
99 'network or server operator'))
99 'network or server operator'))
100 except httplib.HTTPException as e:
100 except httplib.HTTPException as e:
101 raise error.PeerTransportError(
101 raise error.PeerTransportError(
102 _('HTTP request error (%s)') % e,
102 _('HTTP request error (%s)') % e,
103 hint=_('this may be an intermittent network failure; '
103 hint=_('this may be an intermittent network failure; '
104 'if the error persists, consider contacting the '
104 'if the error persists, consider contacting the '
105 'network or server operator'))
105 'network or server operator'))
106
106
107 resp.__class__ = readerproxy
107 resp.__class__ = readerproxy
108
108
109 class _multifile(object):
109 class _multifile(object):
110 def __init__(self, *fileobjs):
110 def __init__(self, *fileobjs):
111 for f in fileobjs:
111 for f in fileobjs:
112 if not util.safehasattr(f, 'length'):
112 if not util.safehasattr(f, 'length'):
113 raise ValueError(
113 raise ValueError(
114 '_multifile only supports file objects that '
114 '_multifile only supports file objects that '
115 'have a length but this one does not:', type(f), f)
115 'have a length but this one does not:', type(f), f)
116 self._fileobjs = fileobjs
116 self._fileobjs = fileobjs
117 self._index = 0
117 self._index = 0
118
118
119 @property
119 @property
120 def length(self):
120 def length(self):
121 return sum(f.length for f in self._fileobjs)
121 return sum(f.length for f in self._fileobjs)
122
122
123 def read(self, amt=None):
123 def read(self, amt=None):
124 if amt <= 0:
124 if amt <= 0:
125 return ''.join(f.read() for f in self._fileobjs)
125 return ''.join(f.read() for f in self._fileobjs)
126 parts = []
126 parts = []
127 while amt and self._index < len(self._fileobjs):
127 while amt and self._index < len(self._fileobjs):
128 parts.append(self._fileobjs[self._index].read(amt))
128 parts.append(self._fileobjs[self._index].read(amt))
129 got = len(parts[-1])
129 got = len(parts[-1])
130 if got < amt:
130 if got < amt:
131 self._index += 1
131 self._index += 1
132 amt -= got
132 amt -= got
133 return ''.join(parts)
133 return ''.join(parts)
134
134
135 def seek(self, offset, whence=os.SEEK_SET):
135 def seek(self, offset, whence=os.SEEK_SET):
136 if whence != os.SEEK_SET:
136 if whence != os.SEEK_SET:
137 raise NotImplementedError(
137 raise NotImplementedError(
138 '_multifile does not support anything other'
138 '_multifile does not support anything other'
139 ' than os.SEEK_SET for whence on seek()')
139 ' than os.SEEK_SET for whence on seek()')
140 if offset != 0:
140 if offset != 0:
141 raise NotImplementedError(
141 raise NotImplementedError(
142 '_multifile only supports seeking to start, but that '
142 '_multifile only supports seeking to start, but that '
143 'could be fixed if you need it')
143 'could be fixed if you need it')
144 for f in self._fileobjs:
144 for f in self._fileobjs:
145 f.seek(0)
145 f.seek(0)
146 self._index = 0
146 self._index = 0
147
147
148 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
148 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
149 repobaseurl, cmd, args):
149 repobaseurl, cmd, args):
150 """Make an HTTP request to run a command for a version 1 client.
150 """Make an HTTP request to run a command for a version 1 client.
151
151
152 ``caps`` is a set of known server capabilities. The value may be
152 ``caps`` is a set of known server capabilities. The value may be
153 None if capabilities are not yet known.
153 None if capabilities are not yet known.
154
154
155 ``capablefn`` is a function to evaluate a capability.
155 ``capablefn`` is a function to evaluate a capability.
156
156
157 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
157 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
158 raw data to pass to it.
158 raw data to pass to it.
159 """
159 """
160 if cmd == 'pushkey':
160 if cmd == 'pushkey':
161 args['data'] = ''
161 args['data'] = ''
162 data = args.pop('data', None)
162 data = args.pop('data', None)
163 headers = args.pop('headers', {})
163 headers = args.pop('headers', {})
164
164
165 ui.debug("sending %s command\n" % cmd)
165 ui.debug("sending %s command\n" % cmd)
166 q = [('cmd', cmd)]
166 q = [('cmd', cmd)]
167 headersize = 0
167 headersize = 0
168 # Important: don't use self.capable() here or else you end up
168 # Important: don't use self.capable() here or else you end up
169 # with infinite recursion when trying to look up capabilities
169 # with infinite recursion when trying to look up capabilities
170 # for the first time.
170 # for the first time.
171 postargsok = caps is not None and 'httppostargs' in caps
171 postargsok = caps is not None and 'httppostargs' in caps
172
172
173 # Send arguments via POST.
173 # Send arguments via POST.
174 if postargsok and args:
174 if postargsok and args:
175 strargs = urlreq.urlencode(sorted(args.items()))
175 strargs = urlreq.urlencode(sorted(args.items()))
176 if not data:
176 if not data:
177 data = strargs
177 data = strargs
178 else:
178 else:
179 if isinstance(data, bytes):
179 if isinstance(data, bytes):
180 i = io.BytesIO(data)
180 i = io.BytesIO(data)
181 i.length = len(data)
181 i.length = len(data)
182 data = i
182 data = i
183 argsio = io.BytesIO(strargs)
183 argsio = io.BytesIO(strargs)
184 argsio.length = len(strargs)
184 argsio.length = len(strargs)
185 data = _multifile(argsio, data)
185 data = _multifile(argsio, data)
186 headers[r'X-HgArgs-Post'] = len(strargs)
186 headers[r'X-HgArgs-Post'] = len(strargs)
187 elif args:
187 elif args:
188 # Calling self.capable() can infinite loop if we are calling
188 # Calling self.capable() can infinite loop if we are calling
189 # "capabilities". But that command should never accept wire
189 # "capabilities". But that command should never accept wire
190 # protocol arguments. So this should never happen.
190 # protocol arguments. So this should never happen.
191 assert cmd != 'capabilities'
191 assert cmd != 'capabilities'
192 httpheader = capablefn('httpheader')
192 httpheader = capablefn('httpheader')
193 if httpheader:
193 if httpheader:
194 headersize = int(httpheader.split(',', 1)[0])
194 headersize = int(httpheader.split(',', 1)[0])
195
195
196 # Send arguments via HTTP headers.
196 # Send arguments via HTTP headers.
197 if headersize > 0:
197 if headersize > 0:
198 # The headers can typically carry more data than the URL.
198 # The headers can typically carry more data than the URL.
199 encargs = urlreq.urlencode(sorted(args.items()))
199 encargs = urlreq.urlencode(sorted(args.items()))
200 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
200 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
201 headersize):
201 headersize):
202 headers[header] = value
202 headers[header] = value
203 # Send arguments via query string (Mercurial <1.9).
203 # Send arguments via query string (Mercurial <1.9).
204 else:
204 else:
205 q += sorted(args.items())
205 q += sorted(args.items())
206
206
207 qs = '?%s' % urlreq.urlencode(q)
207 qs = '?%s' % urlreq.urlencode(q)
208 cu = "%s%s" % (repobaseurl, qs)
208 cu = "%s%s" % (repobaseurl, qs)
209 size = 0
209 size = 0
210 if util.safehasattr(data, 'length'):
210 if util.safehasattr(data, 'length'):
211 size = data.length
211 size = data.length
212 elif data is not None:
212 elif data is not None:
213 size = len(data)
213 size = len(data)
214 if data is not None and r'Content-Type' not in headers:
214 if data is not None and r'Content-Type' not in headers:
215 headers[r'Content-Type'] = r'application/mercurial-0.1'
215 headers[r'Content-Type'] = r'application/mercurial-0.1'
216
216
217 # Tell the server we accept application/mercurial-0.2 and multiple
217 # Tell the server we accept application/mercurial-0.2 and multiple
218 # compression formats if the server is capable of emitting those
218 # compression formats if the server is capable of emitting those
219 # payloads.
219 # payloads.
220 # Note: Keep this set empty by default, as client advertisement of
220 # Note: Keep this set empty by default, as client advertisement of
221 # protocol parameters should only occur after the handshake.
221 # protocol parameters should only occur after the handshake.
222 protoparams = set()
222 protoparams = set()
223
223
224 mediatypes = set()
224 mediatypes = set()
225 if caps is not None:
225 if caps is not None:
226 mt = capablefn('httpmediatype')
226 mt = capablefn('httpmediatype')
227 if mt:
227 if mt:
228 protoparams.add('0.1')
228 protoparams.add('0.1')
229 mediatypes = set(mt.split(','))
229 mediatypes = set(mt.split(','))
230
230
231 protoparams.add('partial-pull')
231 protoparams.add('partial-pull')
232
232
233 if '0.2tx' in mediatypes:
233 if '0.2tx' in mediatypes:
234 protoparams.add('0.2')
234 protoparams.add('0.2')
235
235
236 if '0.2tx' in mediatypes and capablefn('compression'):
236 if '0.2tx' in mediatypes and capablefn('compression'):
237 # We /could/ compare supported compression formats and prune
237 # We /could/ compare supported compression formats and prune
238 # non-mutually supported or error if nothing is mutually supported.
238 # non-mutually supported or error if nothing is mutually supported.
239 # For now, send the full list to the server and have it error.
239 # For now, send the full list to the server and have it error.
240 comps = [e.wireprotosupport().name for e in
240 comps = [e.wireprotosupport().name for e in
241 util.compengines.supportedwireengines(util.CLIENTROLE)]
241 util.compengines.supportedwireengines(util.CLIENTROLE)]
242 protoparams.add('comp=%s' % ','.join(comps))
242 protoparams.add('comp=%s' % ','.join(comps))
243
243
244 if protoparams:
244 if protoparams:
245 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
245 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
246 'X-HgProto',
246 'X-HgProto',
247 headersize or 1024)
247 headersize or 1024)
248 for header, value in protoheaders:
248 for header, value in protoheaders:
249 headers[header] = value
249 headers[header] = value
250
250
251 varyheaders = []
251 varyheaders = []
252 for header in headers:
252 for header in headers:
253 if header.lower().startswith(r'x-hg'):
253 if header.lower().startswith(r'x-hg'):
254 varyheaders.append(header)
254 varyheaders.append(header)
255
255
256 if varyheaders:
256 if varyheaders:
257 headers[r'Vary'] = r','.join(sorted(varyheaders))
257 headers[r'Vary'] = r','.join(sorted(varyheaders))
258
258
259 req = requestbuilder(pycompat.strurl(cu), data, headers)
259 req = requestbuilder(pycompat.strurl(cu), data, headers)
260
260
261 if data is not None:
261 if data is not None:
262 ui.debug("sending %d bytes\n" % size)
262 ui.debug("sending %d bytes\n" % size)
263 req.add_unredirected_header(r'Content-Length', r'%d' % size)
263 req.add_unredirected_header(r'Content-Length', r'%d' % size)
264
264
265 return req, cu, qs
265 return req, cu, qs
266
266
267 def _reqdata(req):
267 def _reqdata(req):
268 """Get request data, if any. If no data, returns None."""
268 """Get request data, if any. If no data, returns None."""
269 if pycompat.ispy3:
269 if pycompat.ispy3:
270 return req.data
270 return req.data
271 if not req.has_data():
271 if not req.has_data():
272 return None
272 return None
273 return req.get_data()
273 return req.get_data()
274
274
275 def sendrequest(ui, opener, req):
275 def sendrequest(ui, opener, req):
276 """Send a prepared HTTP request.
276 """Send a prepared HTTP request.
277
277
278 Returns the response object.
278 Returns the response object.
279 """
279 """
280 if (ui.debugflag
280 if (ui.debugflag
281 and ui.configbool('devel', 'debug.peer-request')):
281 and ui.configbool('devel', 'debug.peer-request')):
282 dbg = ui.debug
282 dbg = ui.debug
283 line = 'devel-peer-request: %s\n'
283 line = 'devel-peer-request: %s\n'
284 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
284 dbg(line % '%s %s' % (pycompat.bytesurl(req.get_method()),
285 pycompat.bytesurl(req.get_full_url())))
285 pycompat.bytesurl(req.get_full_url())))
286 hgargssize = None
286 hgargssize = None
287
287
288 for header, value in sorted(req.header_items()):
288 for header, value in sorted(req.header_items()):
289 header = pycompat.bytesurl(header)
289 header = pycompat.bytesurl(header)
290 value = pycompat.bytesurl(value)
290 value = pycompat.bytesurl(value)
291 if header.startswith('X-hgarg-'):
291 if header.startswith('X-hgarg-'):
292 if hgargssize is None:
292 if hgargssize is None:
293 hgargssize = 0
293 hgargssize = 0
294 hgargssize += len(value)
294 hgargssize += len(value)
295 else:
295 else:
296 dbg(line % ' %s %s' % (header, value))
296 dbg(line % ' %s %s' % (header, value))
297
297
298 if hgargssize is not None:
298 if hgargssize is not None:
299 dbg(line % ' %d bytes of commands arguments in headers'
299 dbg(line % ' %d bytes of commands arguments in headers'
300 % hgargssize)
300 % hgargssize)
301 data = _reqdata(req)
301 data = _reqdata(req)
302 if data is not None:
302 if data is not None:
303 length = getattr(data, 'length', None)
303 length = getattr(data, 'length', None)
304 if length is None:
304 if length is None:
305 length = len(data)
305 length = len(data)
306 dbg(line % ' %d bytes of data' % length)
306 dbg(line % ' %d bytes of data' % length)
307
307
308 start = util.timer()
308 start = util.timer()
309
309
310 try:
310 try:
311 res = opener.open(req)
311 res = opener.open(req)
312 except urlerr.httperror as inst:
312 except urlerr.httperror as inst:
313 if inst.code == 401:
313 if inst.code == 401:
314 raise error.Abort(_('authorization failed'))
314 raise error.Abort(_('authorization failed'))
315 raise
315 raise
316 except httplib.HTTPException as inst:
316 except httplib.HTTPException as inst:
317 ui.debug('http error requesting %s\n' %
317 ui.debug('http error requesting %s\n' %
318 util.hidepassword(req.get_full_url()))
318 util.hidepassword(req.get_full_url()))
319 ui.traceback()
319 ui.traceback()
320 raise IOError(None, inst)
320 raise IOError(None, inst)
321 finally:
321 finally:
322 if ui.configbool('devel', 'debug.peer-request'):
322 if ui.configbool('devel', 'debug.peer-request'):
323 dbg(line % ' finished in %.4f seconds (%d)'
323 dbg(line % ' finished in %.4f seconds (%d)'
324 % (util.timer() - start, res.code))
324 % (util.timer() - start, res.code))
325
325
326 # Insert error handlers for common I/O failures.
326 # Insert error handlers for common I/O failures.
327 _wraphttpresponse(res)
327 _wraphttpresponse(res)
328
328
329 return res
329 return res
330
330
331 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
331 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
332 allowcbor=False):
332 allowcbor=False):
333 # record the url we got redirected to
333 # record the url we got redirected to
334 respurl = pycompat.bytesurl(resp.geturl())
334 respurl = pycompat.bytesurl(resp.geturl())
335 if respurl.endswith(qs):
335 if respurl.endswith(qs):
336 respurl = respurl[:-len(qs)]
336 respurl = respurl[:-len(qs)]
337 if baseurl.rstrip('/') != respurl.rstrip('/'):
337 if baseurl.rstrip('/') != respurl.rstrip('/'):
338 if not ui.quiet:
338 if not ui.quiet:
339 ui.warn(_('real URL is %s\n') % respurl)
339 ui.warn(_('real URL is %s\n') % respurl)
340
340
341 try:
341 try:
342 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
342 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
343 except AttributeError:
343 except AttributeError:
344 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
344 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
345
345
346 safeurl = util.hidepassword(baseurl)
346 safeurl = util.hidepassword(baseurl)
347 if proto.startswith('application/hg-error'):
347 if proto.startswith('application/hg-error'):
348 raise error.OutOfBandError(resp.read())
348 raise error.OutOfBandError(resp.read())
349
349
350 # Pre 1.0 versions of Mercurial used text/plain and
350 # Pre 1.0 versions of Mercurial used text/plain and
351 # application/hg-changegroup. We don't support such old servers.
351 # application/hg-changegroup. We don't support such old servers.
352 if not proto.startswith('application/mercurial-'):
352 if not proto.startswith('application/mercurial-'):
353 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
353 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
354 raise error.RepoError(
354 raise error.RepoError(
355 _("'%s' does not appear to be an hg repository:\n"
355 _("'%s' does not appear to be an hg repository:\n"
356 "---%%<--- (%s)\n%s\n---%%<---\n")
356 "---%%<--- (%s)\n%s\n---%%<---\n")
357 % (safeurl, proto or 'no content-type', resp.read(1024)))
357 % (safeurl, proto or 'no content-type', resp.read(1024)))
358
358
359 try:
359 try:
360 subtype = proto.split('-', 1)[1]
360 subtype = proto.split('-', 1)[1]
361
361
362 # Unless we end up supporting CBOR in the legacy wire protocol,
362 # Unless we end up supporting CBOR in the legacy wire protocol,
363 # this should ONLY be encountered for the initial capabilities
363 # this should ONLY be encountered for the initial capabilities
364 # request during handshake.
364 # request during handshake.
365 if subtype == 'cbor':
365 if subtype == 'cbor':
366 if allowcbor:
366 if allowcbor:
367 return respurl, proto, resp
367 return respurl, proto, resp
368 else:
368 else:
369 raise error.RepoError(_('unexpected CBOR response from '
369 raise error.RepoError(_('unexpected CBOR response from '
370 'server'))
370 'server'))
371
371
372 version_info = tuple([int(n) for n in subtype.split('.')])
372 version_info = tuple([int(n) for n in subtype.split('.')])
373 except ValueError:
373 except ValueError:
374 raise error.RepoError(_("'%s' sent a broken Content-Type "
374 raise error.RepoError(_("'%s' sent a broken Content-Type "
375 "header (%s)") % (safeurl, proto))
375 "header (%s)") % (safeurl, proto))
376
376
377 # TODO consider switching to a decompression reader that uses
377 # TODO consider switching to a decompression reader that uses
378 # generators.
378 # generators.
379 if version_info == (0, 1):
379 if version_info == (0, 1):
380 if compressible:
380 if compressible:
381 resp = util.compengines['zlib'].decompressorreader(resp)
381 resp = util.compengines['zlib'].decompressorreader(resp)
382
382
383 elif version_info == (0, 2):
383 elif version_info == (0, 2):
384 # application/mercurial-0.2 always identifies the compression
384 # application/mercurial-0.2 always identifies the compression
385 # engine in the payload header.
385 # engine in the payload header.
386 elen = struct.unpack('B', resp.read(1))[0]
386 elen = struct.unpack('B', resp.read(1))[0]
387 ename = resp.read(elen)
387 ename = resp.read(elen)
388 engine = util.compengines.forwiretype(ename)
388 engine = util.compengines.forwiretype(ename)
389
389
390 resp = engine.decompressorreader(resp)
390 resp = engine.decompressorreader(resp)
391 else:
391 else:
392 raise error.RepoError(_("'%s' uses newer protocol %s") %
392 raise error.RepoError(_("'%s' uses newer protocol %s") %
393 (safeurl, subtype))
393 (safeurl, subtype))
394
394
395 return respurl, proto, resp
395 return respurl, proto, resp
396
396
397 class httppeer(wireprotov1peer.wirepeer):
397 class httppeer(wireprotov1peer.wirepeer):
398 def __init__(self, ui, path, url, opener, requestbuilder, caps):
398 def __init__(self, ui, path, url, opener, requestbuilder, caps):
399 self.ui = ui
399 self.ui = ui
400 self._path = path
400 self._path = path
401 self._url = url
401 self._url = url
402 self._caps = caps
402 self._caps = caps
403 self._urlopener = opener
403 self._urlopener = opener
404 self._requestbuilder = requestbuilder
404 self._requestbuilder = requestbuilder
405
405
406 def __del__(self):
406 def __del__(self):
407 for h in self._urlopener.handlers:
407 for h in self._urlopener.handlers:
408 h.close()
408 h.close()
409 getattr(h, "close_all", lambda: None)()
409 getattr(h, "close_all", lambda: None)()
410
410
411 # Begin of ipeerconnection interface.
411 # Begin of ipeerconnection interface.
412
412
413 def url(self):
413 def url(self):
414 return self._path
414 return self._path
415
415
416 def local(self):
416 def local(self):
417 return None
417 return None
418
418
419 def peer(self):
419 def peer(self):
420 return self
420 return self
421
421
422 def canpush(self):
422 def canpush(self):
423 return True
423 return True
424
424
425 def close(self):
425 def close(self):
426 pass
426 pass
427
427
428 # End of ipeerconnection interface.
428 # End of ipeerconnection interface.
429
429
430 # Begin of ipeercommands interface.
430 # Begin of ipeercommands interface.
431
431
432 def capabilities(self):
432 def capabilities(self):
433 return self._caps
433 return self._caps
434
434
435 # End of ipeercommands interface.
435 # End of ipeercommands interface.
436
436
437 # look up capabilities only when needed
437 # look up capabilities only when needed
438
438
439 def _callstream(self, cmd, _compressible=False, **args):
439 def _callstream(self, cmd, _compressible=False, **args):
440 args = pycompat.byteskwargs(args)
440 args = pycompat.byteskwargs(args)
441
441
442 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
442 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
443 self._caps, self.capable,
443 self._caps, self.capable,
444 self._url, cmd, args)
444 self._url, cmd, args)
445
445
446 resp = sendrequest(self.ui, self._urlopener, req)
446 resp = sendrequest(self.ui, self._urlopener, req)
447
447
448 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
448 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
449 resp, _compressible)
449 resp, _compressible)
450
450
451 return resp
451 return resp
452
452
453 def _call(self, cmd, **args):
453 def _call(self, cmd, **args):
454 fp = self._callstream(cmd, **args)
454 fp = self._callstream(cmd, **args)
455 try:
455 try:
456 return fp.read()
456 return fp.read()
457 finally:
457 finally:
458 # if using keepalive, allow connection to be reused
458 # if using keepalive, allow connection to be reused
459 fp.close()
459 fp.close()
460
460
461 def _callpush(self, cmd, cg, **args):
461 def _callpush(self, cmd, cg, **args):
462 # have to stream bundle to a temp file because we do not have
462 # have to stream bundle to a temp file because we do not have
463 # http 1.1 chunked transfer.
463 # http 1.1 chunked transfer.
464
464
465 types = self.capable('unbundle')
465 types = self.capable('unbundle')
466 try:
466 try:
467 types = types.split(',')
467 types = types.split(',')
468 except AttributeError:
468 except AttributeError:
469 # servers older than d1b16a746db6 will send 'unbundle' as a
469 # servers older than d1b16a746db6 will send 'unbundle' as a
470 # boolean capability. They only support headerless/uncompressed
470 # boolean capability. They only support headerless/uncompressed
471 # bundles.
471 # bundles.
472 types = [""]
472 types = [""]
473 for x in types:
473 for x in types:
474 if x in bundle2.bundletypes:
474 if x in bundle2.bundletypes:
475 type = x
475 type = x
476 break
476 break
477
477
478 tempname = bundle2.writebundle(self.ui, cg, None, type)
478 tempname = bundle2.writebundle(self.ui, cg, None, type)
479 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
479 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
480 headers = {r'Content-Type': r'application/mercurial-0.1'}
480 headers = {r'Content-Type': r'application/mercurial-0.1'}
481
481
482 try:
482 try:
483 r = self._call(cmd, data=fp, headers=headers, **args)
483 r = self._call(cmd, data=fp, headers=headers, **args)
484 vals = r.split('\n', 1)
484 vals = r.split('\n', 1)
485 if len(vals) < 2:
485 if len(vals) < 2:
486 raise error.ResponseError(_("unexpected response:"), r)
486 raise error.ResponseError(_("unexpected response:"), r)
487 return vals
487 return vals
488 except urlerr.httperror:
488 except urlerr.httperror:
489 # Catch and re-raise these so we don't try and treat them
489 # Catch and re-raise these so we don't try and treat them
490 # like generic socket errors. They lack any values in
490 # like generic socket errors. They lack any values in
491 # .args on Python 3 which breaks our socket.error block.
491 # .args on Python 3 which breaks our socket.error block.
492 raise
492 raise
493 except socket.error as err:
493 except socket.error as err:
494 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
494 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
495 raise error.Abort(_('push failed: %s') % err.args[1])
495 raise error.Abort(_('push failed: %s') % err.args[1])
496 raise error.Abort(err.args[1])
496 raise error.Abort(err.args[1])
497 finally:
497 finally:
498 fp.close()
498 fp.close()
499 os.unlink(tempname)
499 os.unlink(tempname)
500
500
501 def _calltwowaystream(self, cmd, fp, **args):
501 def _calltwowaystream(self, cmd, fp, **args):
502 fh = None
502 fh = None
503 fp_ = None
503 fp_ = None
504 filename = None
504 filename = None
505 try:
505 try:
506 # dump bundle to disk
506 # dump bundle to disk
507 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
507 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
508 fh = os.fdopen(fd, r"wb")
508 fh = os.fdopen(fd, r"wb")
509 d = fp.read(4096)
509 d = fp.read(4096)
510 while d:
510 while d:
511 fh.write(d)
511 fh.write(d)
512 d = fp.read(4096)
512 d = fp.read(4096)
513 fh.close()
513 fh.close()
514 # start http push
514 # start http push
515 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
515 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
516 headers = {r'Content-Type': r'application/mercurial-0.1'}
516 headers = {r'Content-Type': r'application/mercurial-0.1'}
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
518 finally:
518 finally:
519 if fp_ is not None:
519 if fp_ is not None:
520 fp_.close()
520 fp_.close()
521 if fh is not None:
521 if fh is not None:
522 fh.close()
522 fh.close()
523 os.unlink(filename)
523 os.unlink(filename)
524
524
525 def _callcompressable(self, cmd, **args):
525 def _callcompressable(self, cmd, **args):
526 return self._callstream(cmd, _compressible=True, **args)
526 return self._callstream(cmd, _compressible=True, **args)
527
527
528 def _abort(self, exception):
528 def _abort(self, exception):
529 raise exception
529 raise exception
530
530
531 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
531 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
532 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
532 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
533 buffersends=True)
533 buffersends=True)
534
534
535 handler = wireprotov2peer.clienthandler(ui, reactor)
535 handler = wireprotov2peer.clienthandler(ui, reactor)
536
536
537 url = '%s/%s' % (apiurl, permission)
537 url = '%s/%s' % (apiurl, permission)
538
538
539 if len(requests) > 1:
539 if len(requests) > 1:
540 url += '/multirequest'
540 url += '/multirequest'
541 else:
541 else:
542 url += '/%s' % requests[0][0]
542 url += '/%s' % requests[0][0]
543
543
544 for command, args, f in requests:
544 for command, args, f in requests:
545 assert not list(handler.callcommand(command, args, f))
545 assert not list(handler.callcommand(command, args, f))
546
546
547 # TODO stream this.
547 # TODO stream this.
548 body = b''.join(map(bytes, handler.flushcommands()))
548 body = b''.join(map(bytes, handler.flushcommands()))
549
549
550 # TODO modify user-agent to reflect v2
550 # TODO modify user-agent to reflect v2
551 headers = {
551 headers = {
552 r'Accept': wireprotov2server.FRAMINGTYPE,
552 r'Accept': wireprotov2server.FRAMINGTYPE,
553 r'Content-Type': wireprotov2server.FRAMINGTYPE,
553 r'Content-Type': wireprotov2server.FRAMINGTYPE,
554 }
554 }
555
555
556 req = requestbuilder(pycompat.strurl(url), body, headers)
556 req = requestbuilder(pycompat.strurl(url), body, headers)
557 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
557 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
558
558
559 try:
559 try:
560 res = opener.open(req)
560 res = opener.open(req)
561 except urlerr.httperror as e:
561 except urlerr.httperror as e:
562 if e.code == 401:
562 if e.code == 401:
563 raise error.Abort(_('authorization failed'))
563 raise error.Abort(_('authorization failed'))
564
564
565 raise
565 raise
566 except httplib.HTTPException as e:
566 except httplib.HTTPException as e:
567 ui.traceback()
567 ui.traceback()
568 raise IOError(None, e)
568 raise IOError(None, e)
569
569
570 return handler, res
570 return handler, res
571
571
572 class queuedcommandfuture(pycompat.futures.Future):
572 class queuedcommandfuture(pycompat.futures.Future):
573 """Wraps result() on command futures to trigger submission on call."""
573 """Wraps result() on command futures to trigger submission on call."""
574
574
575 def result(self, timeout=None):
575 def result(self, timeout=None):
576 if self.done():
576 if self.done():
577 return pycompat.futures.Future.result(self, timeout)
577 return pycompat.futures.Future.result(self, timeout)
578
578
579 self._peerexecutor.sendcommands()
579 self._peerexecutor.sendcommands()
580
580
581 # sendcommands() will restore the original __class__ and self.result
581 # sendcommands() will restore the original __class__ and self.result
582 # will resolve to Future.result.
582 # will resolve to Future.result.
583 return self.result(timeout)
583 return self.result(timeout)
584
584
585 @zi.implementer(repository.ipeercommandexecutor)
585 @interfaceutil.implementer(repository.ipeercommandexecutor)
586 class httpv2executor(object):
586 class httpv2executor(object):
587 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
587 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
588 self._ui = ui
588 self._ui = ui
589 self._opener = opener
589 self._opener = opener
590 self._requestbuilder = requestbuilder
590 self._requestbuilder = requestbuilder
591 self._apiurl = apiurl
591 self._apiurl = apiurl
592 self._descriptor = descriptor
592 self._descriptor = descriptor
593 self._sent = False
593 self._sent = False
594 self._closed = False
594 self._closed = False
595 self._neededpermissions = set()
595 self._neededpermissions = set()
596 self._calls = []
596 self._calls = []
597 self._futures = weakref.WeakSet()
597 self._futures = weakref.WeakSet()
598 self._responseexecutor = None
598 self._responseexecutor = None
599 self._responsef = None
599 self._responsef = None
600
600
601 def __enter__(self):
601 def __enter__(self):
602 return self
602 return self
603
603
604 def __exit__(self, exctype, excvalue, exctb):
604 def __exit__(self, exctype, excvalue, exctb):
605 self.close()
605 self.close()
606
606
607 def callcommand(self, command, args):
607 def callcommand(self, command, args):
608 if self._sent:
608 if self._sent:
609 raise error.ProgrammingError('callcommand() cannot be used after '
609 raise error.ProgrammingError('callcommand() cannot be used after '
610 'commands are sent')
610 'commands are sent')
611
611
612 if self._closed:
612 if self._closed:
613 raise error.ProgrammingError('callcommand() cannot be used after '
613 raise error.ProgrammingError('callcommand() cannot be used after '
614 'close()')
614 'close()')
615
615
616 # The service advertises which commands are available. So if we attempt
616 # The service advertises which commands are available. So if we attempt
617 # to call an unknown command or pass an unknown argument, we can screen
617 # to call an unknown command or pass an unknown argument, we can screen
618 # for this.
618 # for this.
619 if command not in self._descriptor['commands']:
619 if command not in self._descriptor['commands']:
620 raise error.ProgrammingError(
620 raise error.ProgrammingError(
621 'wire protocol command %s is not available' % command)
621 'wire protocol command %s is not available' % command)
622
622
623 cmdinfo = self._descriptor['commands'][command]
623 cmdinfo = self._descriptor['commands'][command]
624 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
624 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
625
625
626 if unknownargs:
626 if unknownargs:
627 raise error.ProgrammingError(
627 raise error.ProgrammingError(
628 'wire protocol command %s does not accept argument: %s' % (
628 'wire protocol command %s does not accept argument: %s' % (
629 command, ', '.join(sorted(unknownargs))))
629 command, ', '.join(sorted(unknownargs))))
630
630
631 self._neededpermissions |= set(cmdinfo['permissions'])
631 self._neededpermissions |= set(cmdinfo['permissions'])
632
632
633 # TODO we /could/ also validate types here, since the API descriptor
633 # TODO we /could/ also validate types here, since the API descriptor
634 # includes types...
634 # includes types...
635
635
636 f = pycompat.futures.Future()
636 f = pycompat.futures.Future()
637
637
638 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
638 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
639 # could deadlock.
639 # could deadlock.
640 f.__class__ = queuedcommandfuture
640 f.__class__ = queuedcommandfuture
641 f._peerexecutor = self
641 f._peerexecutor = self
642
642
643 self._futures.add(f)
643 self._futures.add(f)
644 self._calls.append((command, args, f))
644 self._calls.append((command, args, f))
645
645
646 return f
646 return f
647
647
648 def sendcommands(self):
648 def sendcommands(self):
649 if self._sent:
649 if self._sent:
650 return
650 return
651
651
652 if not self._calls:
652 if not self._calls:
653 return
653 return
654
654
655 self._sent = True
655 self._sent = True
656
656
657 # Unhack any future types so caller sees a clean type and so we
657 # Unhack any future types so caller sees a clean type and so we
658 # break reference cycle.
658 # break reference cycle.
659 for f in self._futures:
659 for f in self._futures:
660 if isinstance(f, queuedcommandfuture):
660 if isinstance(f, queuedcommandfuture):
661 f.__class__ = pycompat.futures.Future
661 f.__class__ = pycompat.futures.Future
662 f._peerexecutor = None
662 f._peerexecutor = None
663
663
664 # Mark the future as running and filter out cancelled futures.
664 # Mark the future as running and filter out cancelled futures.
665 calls = [(command, args, f)
665 calls = [(command, args, f)
666 for command, args, f in self._calls
666 for command, args, f in self._calls
667 if f.set_running_or_notify_cancel()]
667 if f.set_running_or_notify_cancel()]
668
668
669 # Clear out references, prevent improper object usage.
669 # Clear out references, prevent improper object usage.
670 self._calls = None
670 self._calls = None
671
671
672 if not calls:
672 if not calls:
673 return
673 return
674
674
675 permissions = set(self._neededpermissions)
675 permissions = set(self._neededpermissions)
676
676
677 if 'push' in permissions and 'pull' in permissions:
677 if 'push' in permissions and 'pull' in permissions:
678 permissions.remove('pull')
678 permissions.remove('pull')
679
679
680 if len(permissions) > 1:
680 if len(permissions) > 1:
681 raise error.RepoError(_('cannot make request requiring multiple '
681 raise error.RepoError(_('cannot make request requiring multiple '
682 'permissions: %s') %
682 'permissions: %s') %
683 _(', ').join(sorted(permissions)))
683 _(', ').join(sorted(permissions)))
684
684
685 permission = {
685 permission = {
686 'push': 'rw',
686 'push': 'rw',
687 'pull': 'ro',
687 'pull': 'ro',
688 }[permissions.pop()]
688 }[permissions.pop()]
689
689
690 handler, resp = sendv2request(
690 handler, resp = sendv2request(
691 self._ui, self._opener, self._requestbuilder, self._apiurl,
691 self._ui, self._opener, self._requestbuilder, self._apiurl,
692 permission, calls)
692 permission, calls)
693
693
694 # TODO we probably want to validate the HTTP code, media type, etc.
694 # TODO we probably want to validate the HTTP code, media type, etc.
695
695
696 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
696 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
697 self._responsef = self._responseexecutor.submit(self._handleresponse,
697 self._responsef = self._responseexecutor.submit(self._handleresponse,
698 handler, resp)
698 handler, resp)
699
699
700 def close(self):
700 def close(self):
701 if self._closed:
701 if self._closed:
702 return
702 return
703
703
704 self.sendcommands()
704 self.sendcommands()
705
705
706 self._closed = True
706 self._closed = True
707
707
708 if not self._responsef:
708 if not self._responsef:
709 return
709 return
710
710
711 try:
711 try:
712 self._responsef.result()
712 self._responsef.result()
713 finally:
713 finally:
714 self._responseexecutor.shutdown(wait=True)
714 self._responseexecutor.shutdown(wait=True)
715 self._responsef = None
715 self._responsef = None
716 self._responseexecutor = None
716 self._responseexecutor = None
717
717
718 # If any of our futures are still in progress, mark them as
718 # If any of our futures are still in progress, mark them as
719 # errored, otherwise a result() could wait indefinitely.
719 # errored, otherwise a result() could wait indefinitely.
720 for f in self._futures:
720 for f in self._futures:
721 if not f.done():
721 if not f.done():
722 f.set_exception(error.ResponseError(
722 f.set_exception(error.ResponseError(
723 _('unfulfilled command response')))
723 _('unfulfilled command response')))
724
724
725 self._futures = None
725 self._futures = None
726
726
727 def _handleresponse(self, handler, resp):
727 def _handleresponse(self, handler, resp):
728 # Called in a thread to read the response.
728 # Called in a thread to read the response.
729
729
730 while handler.readframe(resp):
730 while handler.readframe(resp):
731 pass
731 pass
732
732
733 # TODO implement interface for version 2 peers
733 # TODO implement interface for version 2 peers
734 @zi.implementer(repository.ipeerconnection, repository.ipeercapabilities,
734 @interfaceutil.implementer(repository.ipeerconnection,
735 repository.ipeerrequests)
735 repository.ipeercapabilities,
736 repository.ipeerrequests)
736 class httpv2peer(object):
737 class httpv2peer(object):
737 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
738 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
738 apidescriptor):
739 apidescriptor):
739 self.ui = ui
740 self.ui = ui
740
741
741 if repourl.endswith('/'):
742 if repourl.endswith('/'):
742 repourl = repourl[:-1]
743 repourl = repourl[:-1]
743
744
744 self._url = repourl
745 self._url = repourl
745 self._apipath = apipath
746 self._apipath = apipath
746 self._apiurl = '%s/%s' % (repourl, apipath)
747 self._apiurl = '%s/%s' % (repourl, apipath)
747 self._opener = opener
748 self._opener = opener
748 self._requestbuilder = requestbuilder
749 self._requestbuilder = requestbuilder
749 self._descriptor = apidescriptor
750 self._descriptor = apidescriptor
750
751
751 # Start of ipeerconnection.
752 # Start of ipeerconnection.
752
753
753 def url(self):
754 def url(self):
754 return self._url
755 return self._url
755
756
756 def local(self):
757 def local(self):
757 return None
758 return None
758
759
759 def peer(self):
760 def peer(self):
760 return self
761 return self
761
762
762 def canpush(self):
763 def canpush(self):
763 # TODO change once implemented.
764 # TODO change once implemented.
764 return False
765 return False
765
766
766 def close(self):
767 def close(self):
767 pass
768 pass
768
769
769 # End of ipeerconnection.
770 # End of ipeerconnection.
770
771
771 # Start of ipeercapabilities.
772 # Start of ipeercapabilities.
772
773
773 def capable(self, name):
774 def capable(self, name):
774 # The capabilities used internally historically map to capabilities
775 # The capabilities used internally historically map to capabilities
775 # advertised from the "capabilities" wire protocol command. However,
776 # advertised from the "capabilities" wire protocol command. However,
776 # version 2 of that command works differently.
777 # version 2 of that command works differently.
777
778
778 # Maps to commands that are available.
779 # Maps to commands that are available.
779 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
780 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
780 return True
781 return True
781
782
782 # Other concepts.
783 # Other concepts.
783 if name in ('bundle2',):
784 if name in ('bundle2',):
784 return True
785 return True
785
786
786 return False
787 return False
787
788
788 def requirecap(self, name, purpose):
789 def requirecap(self, name, purpose):
789 if self.capable(name):
790 if self.capable(name):
790 return
791 return
791
792
792 raise error.CapabilityError(
793 raise error.CapabilityError(
793 _('cannot %s; client or remote repository does not support the %r '
794 _('cannot %s; client or remote repository does not support the %r '
794 'capability') % (purpose, name))
795 'capability') % (purpose, name))
795
796
796 # End of ipeercapabilities.
797 # End of ipeercapabilities.
797
798
798 def _call(self, name, **args):
799 def _call(self, name, **args):
799 with self.commandexecutor() as e:
800 with self.commandexecutor() as e:
800 return e.callcommand(name, args).result()
801 return e.callcommand(name, args).result()
801
802
802 def commandexecutor(self):
803 def commandexecutor(self):
803 return httpv2executor(self.ui, self._opener, self._requestbuilder,
804 return httpv2executor(self.ui, self._opener, self._requestbuilder,
804 self._apiurl, self._descriptor)
805 self._apiurl, self._descriptor)
805
806
806 # Registry of API service names to metadata about peers that handle it.
807 # Registry of API service names to metadata about peers that handle it.
807 #
808 #
808 # The following keys are meaningful:
809 # The following keys are meaningful:
809 #
810 #
810 # init
811 # init
811 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
812 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
812 # apidescriptor) to create a peer.
813 # apidescriptor) to create a peer.
813 #
814 #
814 # priority
815 # priority
815 # Integer priority for the service. If we could choose from multiple
816 # Integer priority for the service. If we could choose from multiple
816 # services, we choose the one with the highest priority.
817 # services, we choose the one with the highest priority.
817 API_PEERS = {
818 API_PEERS = {
818 wireprototypes.HTTP_WIREPROTO_V2: {
819 wireprototypes.HTTP_WIREPROTO_V2: {
819 'init': httpv2peer,
820 'init': httpv2peer,
820 'priority': 50,
821 'priority': 50,
821 },
822 },
822 }
823 }
823
824
824 def performhandshake(ui, url, opener, requestbuilder):
825 def performhandshake(ui, url, opener, requestbuilder):
825 # The handshake is a request to the capabilities command.
826 # The handshake is a request to the capabilities command.
826
827
827 caps = None
828 caps = None
828 def capable(x):
829 def capable(x):
829 raise error.ProgrammingError('should not be called')
830 raise error.ProgrammingError('should not be called')
830
831
831 args = {}
832 args = {}
832
833
833 # The client advertises support for newer protocols by adding an
834 # The client advertises support for newer protocols by adding an
834 # X-HgUpgrade-* header with a list of supported APIs and an
835 # X-HgUpgrade-* header with a list of supported APIs and an
835 # X-HgProto-* header advertising which serializing formats it supports.
836 # X-HgProto-* header advertising which serializing formats it supports.
836 # We only support the HTTP version 2 transport and CBOR responses for
837 # We only support the HTTP version 2 transport and CBOR responses for
837 # now.
838 # now.
838 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
839 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
839
840
840 if advertisev2:
841 if advertisev2:
841 args['headers'] = {
842 args['headers'] = {
842 r'X-HgProto-1': r'cbor',
843 r'X-HgProto-1': r'cbor',
843 }
844 }
844
845
845 args['headers'].update(
846 args['headers'].update(
846 encodevalueinheaders(' '.join(sorted(API_PEERS)),
847 encodevalueinheaders(' '.join(sorted(API_PEERS)),
847 'X-HgUpgrade',
848 'X-HgUpgrade',
848 # We don't know the header limit this early.
849 # We don't know the header limit this early.
849 # So make it small.
850 # So make it small.
850 1024))
851 1024))
851
852
852 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
853 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
853 capable, url, 'capabilities',
854 capable, url, 'capabilities',
854 args)
855 args)
855
856
856 resp = sendrequest(ui, opener, req)
857 resp = sendrequest(ui, opener, req)
857
858
858 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
859 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
859 compressible=False,
860 compressible=False,
860 allowcbor=advertisev2)
861 allowcbor=advertisev2)
861
862
862 try:
863 try:
863 rawdata = resp.read()
864 rawdata = resp.read()
864 finally:
865 finally:
865 resp.close()
866 resp.close()
866
867
867 if not ct.startswith('application/mercurial-'):
868 if not ct.startswith('application/mercurial-'):
868 raise error.ProgrammingError('unexpected content-type: %s' % ct)
869 raise error.ProgrammingError('unexpected content-type: %s' % ct)
869
870
870 if advertisev2:
871 if advertisev2:
871 if ct == 'application/mercurial-cbor':
872 if ct == 'application/mercurial-cbor':
872 try:
873 try:
873 info = cbor.loads(rawdata)
874 info = cbor.loads(rawdata)
874 except cbor.CBORDecodeError:
875 except cbor.CBORDecodeError:
875 raise error.Abort(_('error decoding CBOR from remote server'),
876 raise error.Abort(_('error decoding CBOR from remote server'),
876 hint=_('try again and consider contacting '
877 hint=_('try again and consider contacting '
877 'the server operator'))
878 'the server operator'))
878
879
879 # We got a legacy response. That's fine.
880 # We got a legacy response. That's fine.
880 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
881 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
881 info = {
882 info = {
882 'v1capabilities': set(rawdata.split())
883 'v1capabilities': set(rawdata.split())
883 }
884 }
884
885
885 else:
886 else:
886 raise error.RepoError(
887 raise error.RepoError(
887 _('unexpected response type from server: %s') % ct)
888 _('unexpected response type from server: %s') % ct)
888 else:
889 else:
889 info = {
890 info = {
890 'v1capabilities': set(rawdata.split())
891 'v1capabilities': set(rawdata.split())
891 }
892 }
892
893
893 return respurl, info
894 return respurl, info
894
895
895 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
896 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
896 """Construct an appropriate HTTP peer instance.
897 """Construct an appropriate HTTP peer instance.
897
898
898 ``opener`` is an ``url.opener`` that should be used to establish
899 ``opener`` is an ``url.opener`` that should be used to establish
899 connections, perform HTTP requests.
900 connections, perform HTTP requests.
900
901
901 ``requestbuilder`` is the type used for constructing HTTP requests.
902 ``requestbuilder`` is the type used for constructing HTTP requests.
902 It exists as an argument so extensions can override the default.
903 It exists as an argument so extensions can override the default.
903 """
904 """
904 u = util.url(path)
905 u = util.url(path)
905 if u.query or u.fragment:
906 if u.query or u.fragment:
906 raise error.Abort(_('unsupported URL component: "%s"') %
907 raise error.Abort(_('unsupported URL component: "%s"') %
907 (u.query or u.fragment))
908 (u.query or u.fragment))
908
909
909 # urllib cannot handle URLs with embedded user or passwd.
910 # urllib cannot handle URLs with embedded user or passwd.
910 url, authinfo = u.authinfo()
911 url, authinfo = u.authinfo()
911 ui.debug('using %s\n' % url)
912 ui.debug('using %s\n' % url)
912
913
913 opener = opener or urlmod.opener(ui, authinfo)
914 opener = opener or urlmod.opener(ui, authinfo)
914
915
915 respurl, info = performhandshake(ui, url, opener, requestbuilder)
916 respurl, info = performhandshake(ui, url, opener, requestbuilder)
916
917
917 # Given the intersection of APIs that both we and the server support,
918 # Given the intersection of APIs that both we and the server support,
918 # sort by their advertised priority and pick the first one.
919 # sort by their advertised priority and pick the first one.
919 #
920 #
920 # TODO consider making this request-based and interface driven. For
921 # TODO consider making this request-based and interface driven. For
921 # example, the caller could say "I want a peer that does X." It's quite
922 # example, the caller could say "I want a peer that does X." It's quite
922 # possible that not all peers would do that. Since we know the service
923 # possible that not all peers would do that. Since we know the service
923 # capabilities, we could filter out services not meeting the
924 # capabilities, we could filter out services not meeting the
924 # requirements. Possibly by consulting the interfaces defined by the
925 # requirements. Possibly by consulting the interfaces defined by the
925 # peer type.
926 # peer type.
926 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
927 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
927
928
928 preferredchoices = sorted(apipeerchoices,
929 preferredchoices = sorted(apipeerchoices,
929 key=lambda x: API_PEERS[x]['priority'],
930 key=lambda x: API_PEERS[x]['priority'],
930 reverse=True)
931 reverse=True)
931
932
932 for service in preferredchoices:
933 for service in preferredchoices:
933 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
934 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
934
935
935 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
936 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
936 requestbuilder,
937 requestbuilder,
937 info['apis'][service])
938 info['apis'][service])
938
939
939 # Failed to construct an API peer. Fall back to legacy.
940 # Failed to construct an API peer. Fall back to legacy.
940 return httppeer(ui, path, respurl, opener, requestbuilder,
941 return httppeer(ui, path, respurl, opener, requestbuilder,
941 info['v1capabilities'])
942 info['v1capabilities'])
942
943
943 def instance(ui, path, create, intents=None):
944 def instance(ui, path, create, intents=None):
944 if create:
945 if create:
945 raise error.Abort(_('cannot create new http repository'))
946 raise error.Abort(_('cannot create new http repository'))
946 try:
947 try:
947 if path.startswith('https:') and not urlmod.has_https:
948 if path.startswith('https:') and not urlmod.has_https:
948 raise error.Abort(_('Python support for SSL and HTTPS '
949 raise error.Abort(_('Python support for SSL and HTTPS '
949 'is not installed'))
950 'is not installed'))
950
951
951 inst = makepeer(ui, path)
952 inst = makepeer(ui, path)
952
953
953 return inst
954 return inst
954 except error.RepoError as httpexception:
955 except error.RepoError as httpexception:
955 try:
956 try:
956 r = statichttprepo.instance(ui, "static-" + path, create)
957 r = statichttprepo.instance(ui, "static-" + path, create)
957 ui.note(_('(falling back to static-http)\n'))
958 ui.note(_('(falling back to static-http)\n'))
958 return r
959 return r
959 except error.RepoError:
960 except error.RepoError:
960 raise httpexception # use the original http RepoError instead
961 raise httpexception # use the original http RepoError instead
@@ -1,2380 +1,2378 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
25 interface as zi,
26 )
27 from . import (
24 from . import (
28 bookmarks,
25 bookmarks,
29 branchmap,
26 branchmap,
30 bundle2,
27 bundle2,
31 changegroup,
28 changegroup,
32 changelog,
29 changelog,
33 color,
30 color,
34 context,
31 context,
35 dirstate,
32 dirstate,
36 dirstateguard,
33 dirstateguard,
37 discovery,
34 discovery,
38 encoding,
35 encoding,
39 error,
36 error,
40 exchange,
37 exchange,
41 extensions,
38 extensions,
42 filelog,
39 filelog,
43 hook,
40 hook,
44 lock as lockmod,
41 lock as lockmod,
45 manifest,
42 manifest,
46 match as matchmod,
43 match as matchmod,
47 merge as mergemod,
44 merge as mergemod,
48 mergeutil,
45 mergeutil,
49 namespaces,
46 namespaces,
50 narrowspec,
47 narrowspec,
51 obsolete,
48 obsolete,
52 pathutil,
49 pathutil,
53 phases,
50 phases,
54 pushkey,
51 pushkey,
55 pycompat,
52 pycompat,
56 repository,
53 repository,
57 repoview,
54 repoview,
58 revset,
55 revset,
59 revsetlang,
56 revsetlang,
60 scmutil,
57 scmutil,
61 sparse,
58 sparse,
62 store,
59 store,
63 subrepoutil,
60 subrepoutil,
64 tags as tagsmod,
61 tags as tagsmod,
65 transaction,
62 transaction,
66 txnutil,
63 txnutil,
67 util,
64 util,
68 vfs as vfsmod,
65 vfs as vfsmod,
69 )
66 )
70 from .utils import (
67 from .utils import (
68 interfaceutil,
71 procutil,
69 procutil,
72 stringutil,
70 stringutil,
73 )
71 )
74
72
75 release = lockmod.release
73 release = lockmod.release
76 urlerr = util.urlerr
74 urlerr = util.urlerr
77 urlreq = util.urlreq
75 urlreq = util.urlreq
78
76
79 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
79 # - '' for svfs relative paths
82 _cachedfiles = set()
80 _cachedfiles = set()
83
81
84 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
86 """
84 """
87 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
88 if repo is None:
86 if repo is None:
89 return self
87 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
89 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
91 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
93
96 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
96 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
100 for path in paths:
98 for path in paths:
101 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
102
100
103 def join(self, obj, fname):
101 def join(self, obj, fname):
104 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
105
103
106 class storecache(_basefilecache):
104 class storecache(_basefilecache):
107 """filecache for files in the store"""
105 """filecache for files in the store"""
108 def __init__(self, *paths):
106 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
110 for path in paths:
108 for path in paths:
111 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
112
110
113 def join(self, obj, fname):
111 def join(self, obj, fname):
114 return obj.sjoin(fname)
112 return obj.sjoin(fname)
115
113
116 def isfilecached(repo, name):
114 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
118
116
119 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
120 """
118 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
120 if not cacheentry:
123 return None, False
121 return None, False
124 return cacheentry.obj, True
122 return cacheentry.obj, True
125
123
126 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
128
126
129 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
131 if unfi is repo:
129 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
134
132
135 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
137
135
138 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
140
138
141
139
142 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
145
143
146 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
148 return wrapper
151
149
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
151 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
155
153
156 @zi.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
157 class localcommandexecutor(object):
155 class localcommandexecutor(object):
158 def __init__(self, peer):
156 def __init__(self, peer):
159 self._peer = peer
157 self._peer = peer
160 self._sent = False
158 self._sent = False
161 self._closed = False
159 self._closed = False
162
160
163 def __enter__(self):
161 def __enter__(self):
164 return self
162 return self
165
163
166 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
167 self.close()
165 self.close()
168
166
169 def callcommand(self, command, args):
167 def callcommand(self, command, args):
170 if self._sent:
168 if self._sent:
171 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
172 'sendcommands()')
170 'sendcommands()')
173
171
174 if self._closed:
172 if self._closed:
175 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
176 'close()')
174 'close()')
177
175
178 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
179 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
180 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
181
179
182 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
183
181
184 try:
182 try:
185 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
186 except Exception:
184 except Exception:
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
188 else:
186 else:
189 f.set_result(result)
187 f.set_result(result)
190
188
191 return f
189 return f
192
190
193 def sendcommands(self):
191 def sendcommands(self):
194 self._sent = True
192 self._sent = True
195
193
196 def close(self):
194 def close(self):
197 self._closed = True
195 self._closed = True
198
196
199 @zi.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
200 class localpeer(repository.peer):
198 class localpeer(repository.peer):
201 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
202
200
203 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
204 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
205
203
206 if caps is None:
204 if caps is None:
207 caps = moderncaps.copy()
205 caps = moderncaps.copy()
208 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
209 self.ui = repo.ui
207 self.ui = repo.ui
210 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
211
209
212 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
213
211
214 def url(self):
212 def url(self):
215 return self._repo.url()
213 return self._repo.url()
216
214
217 def local(self):
215 def local(self):
218 return self._repo
216 return self._repo
219
217
220 def peer(self):
218 def peer(self):
221 return self
219 return self
222
220
223 def canpush(self):
221 def canpush(self):
224 return True
222 return True
225
223
226 def close(self):
224 def close(self):
227 self._repo.close()
225 self._repo.close()
228
226
229 # End of _basepeer interface.
227 # End of _basepeer interface.
230
228
231 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
232
230
233 def branchmap(self):
231 def branchmap(self):
234 return self._repo.branchmap()
232 return self._repo.branchmap()
235
233
236 def capabilities(self):
234 def capabilities(self):
237 return self._caps
235 return self._caps
238
236
239 def clonebundles(self):
237 def clonebundles(self):
240 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
241
239
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
243 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
245 pycompat.bytestr(four),
243 pycompat.bytestr(four),
246 pycompat.bytestr(five))
244 pycompat.bytestr(five))
247
245
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
249 **kwargs):
247 **kwargs):
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
251 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
252 **kwargs)[1]
250 **kwargs)[1]
253 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
254
252
255 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
256 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
257 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
258 # from it in local peer.
256 # from it in local peer.
259 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
260 else:
258 else:
261 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
262
260
263 def heads(self):
261 def heads(self):
264 return self._repo.heads()
262 return self._repo.heads()
265
263
266 def known(self, nodes):
264 def known(self, nodes):
267 return self._repo.known(nodes)
265 return self._repo.known(nodes)
268
266
269 def listkeys(self, namespace):
267 def listkeys(self, namespace):
270 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
271
269
272 def lookup(self, key):
270 def lookup(self, key):
273 return self._repo.lookup(key)
271 return self._repo.lookup(key)
274
272
275 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
276 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
277
275
278 def stream_out(self):
276 def stream_out(self):
279 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
280 'peer'))
278 'peer'))
281
279
282 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
283 """apply a bundle on a repo
281 """apply a bundle on a repo
284
282
285 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
286 try:
284 try:
287 try:
285 try:
288 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
290 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
291 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
292 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
293 # API is finally improved.
291 # API is finally improved.
294 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
295 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
296 return ret
294 return ret
297 except Exception as exc:
295 except Exception as exc:
298 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
299 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
300 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
301 # it directly.
299 # it directly.
302 #
300 #
303 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
304 # issue4594
302 # issue4594
305 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
306 if output:
304 if output:
307 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
308 for out in output:
306 for out in output:
309 bundler.addpart(out)
307 bundler.addpart(out)
310 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
311 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
312 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
313 raise
311 raise
314 except error.PushRaced as exc:
312 except error.PushRaced as exc:
315 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
316 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
317
315
318 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
319
317
320 # Begin of peer interface.
318 # Begin of peer interface.
321
319
322 def commandexecutor(self):
320 def commandexecutor(self):
323 return localcommandexecutor(self)
321 return localcommandexecutor(self)
324
322
325 # End of peer interface.
323 # End of peer interface.
326
324
327 @zi.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
328 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
329 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
330 restricted capabilities'''
328 restricted capabilities'''
331
329
332 def __init__(self, repo):
330 def __init__(self, repo):
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
334
332
335 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
336
334
337 def between(self, pairs):
335 def between(self, pairs):
338 return self._repo.between(pairs)
336 return self._repo.between(pairs)
339
337
340 def branches(self, nodes):
338 def branches(self, nodes):
341 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
342
340
343 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
345 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
347
345
348 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
350 missingheads=heads)
348 missingheads=heads)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
352
350
353 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
354
352
355 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
356 # clients.
354 # clients.
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
358
356
359 # Functions receiving (ui, features) that extensions can register to impact
357 # Functions receiving (ui, features) that extensions can register to impact
360 # the ability to load repositories with custom requirements. Only
358 # the ability to load repositories with custom requirements. Only
361 # functions defined in loaded extensions are called.
359 # functions defined in loaded extensions are called.
362 #
360 #
363 # The function receives a set of requirement strings that the repository
361 # The function receives a set of requirement strings that the repository
364 # is capable of opening. Functions will typically add elements to the
362 # is capable of opening. Functions will typically add elements to the
365 # set to reflect that the extension knows how to handle that requirements.
363 # set to reflect that the extension knows how to handle that requirements.
366 featuresetupfuncs = set()
364 featuresetupfuncs = set()
367
365
368 @zi.implementer(repository.completelocalrepository)
366 @interfaceutil.implementer(repository.completelocalrepository)
369 class localrepository(object):
367 class localrepository(object):
370
368
371 # obsolete experimental requirements:
369 # obsolete experimental requirements:
372 # - manifestv2: An experimental new manifest format that allowed
370 # - manifestv2: An experimental new manifest format that allowed
373 # for stem compression of long paths. Experiment ended up not
371 # for stem compression of long paths. Experiment ended up not
374 # being successful (repository sizes went up due to worse delta
372 # being successful (repository sizes went up due to worse delta
375 # chains), and the code was deleted in 4.6.
373 # chains), and the code was deleted in 4.6.
376 supportedformats = {
374 supportedformats = {
377 'revlogv1',
375 'revlogv1',
378 'generaldelta',
376 'generaldelta',
379 'treemanifest',
377 'treemanifest',
380 REVLOGV2_REQUIREMENT,
378 REVLOGV2_REQUIREMENT,
381 }
379 }
382 _basesupported = supportedformats | {
380 _basesupported = supportedformats | {
383 'store',
381 'store',
384 'fncache',
382 'fncache',
385 'shared',
383 'shared',
386 'relshared',
384 'relshared',
387 'dotencode',
385 'dotencode',
388 'exp-sparse',
386 'exp-sparse',
389 }
387 }
390 openerreqs = {
388 openerreqs = {
391 'revlogv1',
389 'revlogv1',
392 'generaldelta',
390 'generaldelta',
393 'treemanifest',
391 'treemanifest',
394 }
392 }
395
393
396 # list of prefix for file which can be written without 'wlock'
394 # list of prefix for file which can be written without 'wlock'
397 # Extensions should extend this list when needed
395 # Extensions should extend this list when needed
398 _wlockfreeprefix = {
396 _wlockfreeprefix = {
399 # We migh consider requiring 'wlock' for the next
397 # We migh consider requiring 'wlock' for the next
400 # two, but pretty much all the existing code assume
398 # two, but pretty much all the existing code assume
401 # wlock is not needed so we keep them excluded for
399 # wlock is not needed so we keep them excluded for
402 # now.
400 # now.
403 'hgrc',
401 'hgrc',
404 'requires',
402 'requires',
405 # XXX cache is a complicatged business someone
403 # XXX cache is a complicatged business someone
406 # should investigate this in depth at some point
404 # should investigate this in depth at some point
407 'cache/',
405 'cache/',
408 # XXX shouldn't be dirstate covered by the wlock?
406 # XXX shouldn't be dirstate covered by the wlock?
409 'dirstate',
407 'dirstate',
410 # XXX bisect was still a bit too messy at the time
408 # XXX bisect was still a bit too messy at the time
411 # this changeset was introduced. Someone should fix
409 # this changeset was introduced. Someone should fix
412 # the remainig bit and drop this line
410 # the remainig bit and drop this line
413 'bisect.state',
411 'bisect.state',
414 }
412 }
415
413
416 def __init__(self, baseui, path, create=False, intents=None):
414 def __init__(self, baseui, path, create=False, intents=None):
417 self.requirements = set()
415 self.requirements = set()
418 self.filtername = None
416 self.filtername = None
419 # wvfs: rooted at the repository root, used to access the working copy
417 # wvfs: rooted at the repository root, used to access the working copy
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
419 # vfs: rooted at .hg, used to access repo files outside of .hg/store
422 self.vfs = None
420 self.vfs = None
423 # svfs: usually rooted at .hg/store, used to access repository history
421 # svfs: usually rooted at .hg/store, used to access repository history
424 # If this is a shared repository, this vfs may point to another
422 # If this is a shared repository, this vfs may point to another
425 # repository's .hg/store directory.
423 # repository's .hg/store directory.
426 self.svfs = None
424 self.svfs = None
427 self.root = self.wvfs.base
425 self.root = self.wvfs.base
428 self.path = self.wvfs.join(".hg")
426 self.path = self.wvfs.join(".hg")
429 self.origroot = path
427 self.origroot = path
430 # This is only used by context.workingctx.match in order to
428 # This is only used by context.workingctx.match in order to
431 # detect files in subrepos.
429 # detect files in subrepos.
432 self.auditor = pathutil.pathauditor(
430 self.auditor = pathutil.pathauditor(
433 self.root, callback=self._checknested)
431 self.root, callback=self._checknested)
434 # This is only used by context.basectx.match in order to detect
432 # This is only used by context.basectx.match in order to detect
435 # files in subrepos.
433 # files in subrepos.
436 self.nofsauditor = pathutil.pathauditor(
434 self.nofsauditor = pathutil.pathauditor(
437 self.root, callback=self._checknested, realfs=False, cached=True)
435 self.root, callback=self._checknested, realfs=False, cached=True)
438 self.baseui = baseui
436 self.baseui = baseui
439 self.ui = baseui.copy()
437 self.ui = baseui.copy()
440 self.ui.copy = baseui.copy # prevent copying repo configuration
438 self.ui.copy = baseui.copy # prevent copying repo configuration
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
439 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 if (self.ui.configbool('devel', 'all-warnings') or
440 if (self.ui.configbool('devel', 'all-warnings') or
443 self.ui.configbool('devel', 'check-locks')):
441 self.ui.configbool('devel', 'check-locks')):
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
442 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 # A list of callback to shape the phase if no data were found.
443 # A list of callback to shape the phase if no data were found.
446 # Callback are in the form: func(repo, roots) --> processed root.
444 # Callback are in the form: func(repo, roots) --> processed root.
447 # This list it to be filled by extension during repo setup
445 # This list it to be filled by extension during repo setup
448 self._phasedefaults = []
446 self._phasedefaults = []
449 try:
447 try:
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
448 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 self._loadextensions()
449 self._loadextensions()
452 except IOError:
450 except IOError:
453 pass
451 pass
454
452
455 if featuresetupfuncs:
453 if featuresetupfuncs:
456 self.supported = set(self._basesupported) # use private copy
454 self.supported = set(self._basesupported) # use private copy
457 extmods = set(m.__name__ for n, m
455 extmods = set(m.__name__ for n, m
458 in extensions.extensions(self.ui))
456 in extensions.extensions(self.ui))
459 for setupfunc in featuresetupfuncs:
457 for setupfunc in featuresetupfuncs:
460 if setupfunc.__module__ in extmods:
458 if setupfunc.__module__ in extmods:
461 setupfunc(self.ui, self.supported)
459 setupfunc(self.ui, self.supported)
462 else:
460 else:
463 self.supported = self._basesupported
461 self.supported = self._basesupported
464 color.setup(self.ui)
462 color.setup(self.ui)
465
463
466 # Add compression engines.
464 # Add compression engines.
467 for name in util.compengines:
465 for name in util.compengines:
468 engine = util.compengines[name]
466 engine = util.compengines[name]
469 if engine.revlogheader():
467 if engine.revlogheader():
470 self.supported.add('exp-compression-%s' % name)
468 self.supported.add('exp-compression-%s' % name)
471
469
472 if not self.vfs.isdir():
470 if not self.vfs.isdir():
473 if create:
471 if create:
474 self.requirements = newreporequirements(self)
472 self.requirements = newreporequirements(self)
475
473
476 if not self.wvfs.exists():
474 if not self.wvfs.exists():
477 self.wvfs.makedirs()
475 self.wvfs.makedirs()
478 self.vfs.makedir(notindexed=True)
476 self.vfs.makedir(notindexed=True)
479
477
480 if 'store' in self.requirements:
478 if 'store' in self.requirements:
481 self.vfs.mkdir("store")
479 self.vfs.mkdir("store")
482
480
483 # create an invalid changelog
481 # create an invalid changelog
484 self.vfs.append(
482 self.vfs.append(
485 "00changelog.i",
483 "00changelog.i",
486 '\0\0\0\2' # represents revlogv2
484 '\0\0\0\2' # represents revlogv2
487 ' dummy changelog to prevent using the old repo layout'
485 ' dummy changelog to prevent using the old repo layout'
488 )
486 )
489 else:
487 else:
490 raise error.RepoError(_("repository %s not found") % path)
488 raise error.RepoError(_("repository %s not found") % path)
491 elif create:
489 elif create:
492 raise error.RepoError(_("repository %s already exists") % path)
490 raise error.RepoError(_("repository %s already exists") % path)
493 else:
491 else:
494 try:
492 try:
495 self.requirements = scmutil.readrequires(
493 self.requirements = scmutil.readrequires(
496 self.vfs, self.supported)
494 self.vfs, self.supported)
497 except IOError as inst:
495 except IOError as inst:
498 if inst.errno != errno.ENOENT:
496 if inst.errno != errno.ENOENT:
499 raise
497 raise
500
498
501 cachepath = self.vfs.join('cache')
499 cachepath = self.vfs.join('cache')
502 self.sharedpath = self.path
500 self.sharedpath = self.path
503 try:
501 try:
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
505 if 'relshared' in self.requirements:
503 if 'relshared' in self.requirements:
506 sharedpath = self.vfs.join(sharedpath)
504 sharedpath = self.vfs.join(sharedpath)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 vfs = vfsmod.vfs(sharedpath, realpath=True)
508 cachepath = vfs.join('cache')
506 cachepath = vfs.join('cache')
509 s = vfs.base
507 s = vfs.base
510 if not vfs.exists():
508 if not vfs.exists():
511 raise error.RepoError(
509 raise error.RepoError(
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 _('.hg/sharedpath points to nonexistent directory %s') % s)
513 self.sharedpath = s
511 self.sharedpath = s
514 except IOError as inst:
512 except IOError as inst:
515 if inst.errno != errno.ENOENT:
513 if inst.errno != errno.ENOENT:
516 raise
514 raise
517
515
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 if 'exp-sparse' in self.requirements and not sparse.enabled:
519 raise error.RepoError(_('repository is using sparse feature but '
517 raise error.RepoError(_('repository is using sparse feature but '
520 'sparse is not enabled; enable the '
518 'sparse is not enabled; enable the '
521 '"sparse" extensions to access'))
519 '"sparse" extensions to access'))
522
520
523 self.store = store.store(
521 self.store = store.store(
524 self.requirements, self.sharedpath,
522 self.requirements, self.sharedpath,
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 lambda base: vfsmod.vfs(base, cacheaudited=True))
526 self.spath = self.store.path
524 self.spath = self.store.path
527 self.svfs = self.store.vfs
525 self.svfs = self.store.vfs
528 self.sjoin = self.store.join
526 self.sjoin = self.store.join
529 self.vfs.createmode = self.store.createmode
527 self.vfs.createmode = self.store.createmode
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
531 self.cachevfs.createmode = self.store.createmode
529 self.cachevfs.createmode = self.store.createmode
532 if (self.ui.configbool('devel', 'all-warnings') or
530 if (self.ui.configbool('devel', 'all-warnings') or
533 self.ui.configbool('devel', 'check-locks')):
531 self.ui.configbool('devel', 'check-locks')):
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
536 else: # standard vfs
534 else: # standard vfs
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self.svfs.audit = self._getsvfsward(self.svfs.audit)
538 self._applyopenerreqs()
536 self._applyopenerreqs()
539 if create:
537 if create:
540 self._writerequirements()
538 self._writerequirements()
541
539
542 self._dirstatevalidatewarned = False
540 self._dirstatevalidatewarned = False
543
541
544 self._branchcaches = {}
542 self._branchcaches = {}
545 self._revbranchcache = None
543 self._revbranchcache = None
546 self._filterpats = {}
544 self._filterpats = {}
547 self._datafilters = {}
545 self._datafilters = {}
548 self._transref = self._lockref = self._wlockref = None
546 self._transref = self._lockref = self._wlockref = None
549
547
550 # A cache for various files under .hg/ that tracks file changes,
548 # A cache for various files under .hg/ that tracks file changes,
551 # (used by the filecache decorator)
549 # (used by the filecache decorator)
552 #
550 #
553 # Maps a property name to its util.filecacheentry
551 # Maps a property name to its util.filecacheentry
554 self._filecache = {}
552 self._filecache = {}
555
553
556 # hold sets of revision to be filtered
554 # hold sets of revision to be filtered
557 # should be cleared when something might have changed the filter value:
555 # should be cleared when something might have changed the filter value:
558 # - new changesets,
556 # - new changesets,
559 # - phase change,
557 # - phase change,
560 # - new obsolescence marker,
558 # - new obsolescence marker,
561 # - working directory parent change,
559 # - working directory parent change,
562 # - bookmark changes
560 # - bookmark changes
563 self.filteredrevcache = {}
561 self.filteredrevcache = {}
564
562
565 # post-dirstate-status hooks
563 # post-dirstate-status hooks
566 self._postdsstatus = []
564 self._postdsstatus = []
567
565
568 # generic mapping between names and nodes
566 # generic mapping between names and nodes
569 self.names = namespaces.namespaces()
567 self.names = namespaces.namespaces()
570
568
571 # Key to signature value.
569 # Key to signature value.
572 self._sparsesignaturecache = {}
570 self._sparsesignaturecache = {}
573 # Signature to cached matcher instance.
571 # Signature to cached matcher instance.
574 self._sparsematchercache = {}
572 self._sparsematchercache = {}
575
573
576 def _getvfsward(self, origfunc):
574 def _getvfsward(self, origfunc):
577 """build a ward for self.vfs"""
575 """build a ward for self.vfs"""
578 rref = weakref.ref(self)
576 rref = weakref.ref(self)
579 def checkvfs(path, mode=None):
577 def checkvfs(path, mode=None):
580 ret = origfunc(path, mode=mode)
578 ret = origfunc(path, mode=mode)
581 repo = rref()
579 repo = rref()
582 if (repo is None
580 if (repo is None
583 or not util.safehasattr(repo, '_wlockref')
581 or not util.safehasattr(repo, '_wlockref')
584 or not util.safehasattr(repo, '_lockref')):
582 or not util.safehasattr(repo, '_lockref')):
585 return
583 return
586 if mode in (None, 'r', 'rb'):
584 if mode in (None, 'r', 'rb'):
587 return
585 return
588 if path.startswith(repo.path):
586 if path.startswith(repo.path):
589 # truncate name relative to the repository (.hg)
587 # truncate name relative to the repository (.hg)
590 path = path[len(repo.path) + 1:]
588 path = path[len(repo.path) + 1:]
591 if path.startswith('cache/'):
589 if path.startswith('cache/'):
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
590 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
591 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
594 if path.startswith('journal.'):
592 if path.startswith('journal.'):
595 # journal is covered by 'lock'
593 # journal is covered by 'lock'
596 if repo._currentlock(repo._lockref) is None:
594 if repo._currentlock(repo._lockref) is None:
597 repo.ui.develwarn('write with no lock: "%s"' % path,
595 repo.ui.develwarn('write with no lock: "%s"' % path,
598 stacklevel=2, config='check-locks')
596 stacklevel=2, config='check-locks')
599 elif repo._currentlock(repo._wlockref) is None:
597 elif repo._currentlock(repo._wlockref) is None:
600 # rest of vfs files are covered by 'wlock'
598 # rest of vfs files are covered by 'wlock'
601 #
599 #
602 # exclude special files
600 # exclude special files
603 for prefix in self._wlockfreeprefix:
601 for prefix in self._wlockfreeprefix:
604 if path.startswith(prefix):
602 if path.startswith(prefix):
605 return
603 return
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
604 repo.ui.develwarn('write with no wlock: "%s"' % path,
607 stacklevel=2, config='check-locks')
605 stacklevel=2, config='check-locks')
608 return ret
606 return ret
609 return checkvfs
607 return checkvfs
610
608
611 def _getsvfsward(self, origfunc):
609 def _getsvfsward(self, origfunc):
612 """build a ward for self.svfs"""
610 """build a ward for self.svfs"""
613 rref = weakref.ref(self)
611 rref = weakref.ref(self)
614 def checksvfs(path, mode=None):
612 def checksvfs(path, mode=None):
615 ret = origfunc(path, mode=mode)
613 ret = origfunc(path, mode=mode)
616 repo = rref()
614 repo = rref()
617 if repo is None or not util.safehasattr(repo, '_lockref'):
615 if repo is None or not util.safehasattr(repo, '_lockref'):
618 return
616 return
619 if mode in (None, 'r', 'rb'):
617 if mode in (None, 'r', 'rb'):
620 return
618 return
621 if path.startswith(repo.sharedpath):
619 if path.startswith(repo.sharedpath):
622 # truncate name relative to the repository (.hg)
620 # truncate name relative to the repository (.hg)
623 path = path[len(repo.sharedpath) + 1:]
621 path = path[len(repo.sharedpath) + 1:]
624 if repo._currentlock(repo._lockref) is None:
622 if repo._currentlock(repo._lockref) is None:
625 repo.ui.develwarn('write with no lock: "%s"' % path,
623 repo.ui.develwarn('write with no lock: "%s"' % path,
626 stacklevel=3)
624 stacklevel=3)
627 return ret
625 return ret
628 return checksvfs
626 return checksvfs
629
627
630 def close(self):
628 def close(self):
631 self._writecaches()
629 self._writecaches()
632
630
633 def _loadextensions(self):
631 def _loadextensions(self):
634 extensions.loadall(self.ui)
632 extensions.loadall(self.ui)
635
633
636 def _writecaches(self):
634 def _writecaches(self):
637 if self._revbranchcache:
635 if self._revbranchcache:
638 self._revbranchcache.write()
636 self._revbranchcache.write()
639
637
640 def _restrictcapabilities(self, caps):
638 def _restrictcapabilities(self, caps):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
639 if self.ui.configbool('experimental', 'bundle2-advertise'):
642 caps = set(caps)
640 caps = set(caps)
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
641 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
644 role='client'))
642 role='client'))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
643 caps.add('bundle2=' + urlreq.quote(capsblob))
646 return caps
644 return caps
647
645
648 def _applyopenerreqs(self):
646 def _applyopenerreqs(self):
649 self.svfs.options = dict((r, 1) for r in self.requirements
647 self.svfs.options = dict((r, 1) for r in self.requirements
650 if r in self.openerreqs)
648 if r in self.openerreqs)
651 # experimental config: format.chunkcachesize
649 # experimental config: format.chunkcachesize
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
650 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
653 if chunkcachesize is not None:
651 if chunkcachesize is not None:
654 self.svfs.options['chunkcachesize'] = chunkcachesize
652 self.svfs.options['chunkcachesize'] = chunkcachesize
655 # experimental config: format.maxchainlen
653 # experimental config: format.maxchainlen
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
654 maxchainlen = self.ui.configint('format', 'maxchainlen')
657 if maxchainlen is not None:
655 if maxchainlen is not None:
658 self.svfs.options['maxchainlen'] = maxchainlen
656 self.svfs.options['maxchainlen'] = maxchainlen
659 # experimental config: format.manifestcachesize
657 # experimental config: format.manifestcachesize
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
658 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
661 if manifestcachesize is not None:
659 if manifestcachesize is not None:
662 self.svfs.options['manifestcachesize'] = manifestcachesize
660 self.svfs.options['manifestcachesize'] = manifestcachesize
663 # experimental config: format.aggressivemergedeltas
661 # experimental config: format.aggressivemergedeltas
664 aggressivemergedeltas = self.ui.configbool('format',
662 aggressivemergedeltas = self.ui.configbool('format',
665 'aggressivemergedeltas')
663 'aggressivemergedeltas')
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
664 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
665 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
666 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
669 if 0 <= chainspan:
667 if 0 <= chainspan:
670 self.svfs.options['maxdeltachainspan'] = chainspan
668 self.svfs.options['maxdeltachainspan'] = chainspan
671 mmapindexthreshold = self.ui.configbytes('experimental',
669 mmapindexthreshold = self.ui.configbytes('experimental',
672 'mmapindexthreshold')
670 'mmapindexthreshold')
673 if mmapindexthreshold is not None:
671 if mmapindexthreshold is not None:
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
672 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
673 withsparseread = self.ui.configbool('experimental', 'sparse-read')
676 srdensitythres = float(self.ui.config('experimental',
674 srdensitythres = float(self.ui.config('experimental',
677 'sparse-read.density-threshold'))
675 'sparse-read.density-threshold'))
678 srmingapsize = self.ui.configbytes('experimental',
676 srmingapsize = self.ui.configbytes('experimental',
679 'sparse-read.min-gap-size')
677 'sparse-read.min-gap-size')
680 self.svfs.options['with-sparse-read'] = withsparseread
678 self.svfs.options['with-sparse-read'] = withsparseread
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
679 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
680 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
683
681
684 for r in self.requirements:
682 for r in self.requirements:
685 if r.startswith('exp-compression-'):
683 if r.startswith('exp-compression-'):
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
684 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687
685
688 # TODO move "revlogv2" to openerreqs once finalized.
686 # TODO move "revlogv2" to openerreqs once finalized.
689 if REVLOGV2_REQUIREMENT in self.requirements:
687 if REVLOGV2_REQUIREMENT in self.requirements:
690 self.svfs.options['revlogv2'] = True
688 self.svfs.options['revlogv2'] = True
691
689
692 def _writerequirements(self):
690 def _writerequirements(self):
693 scmutil.writerequires(self.vfs, self.requirements)
691 scmutil.writerequires(self.vfs, self.requirements)
694
692
695 def _checknested(self, path):
693 def _checknested(self, path):
696 """Determine if path is a legal nested repository."""
694 """Determine if path is a legal nested repository."""
697 if not path.startswith(self.root):
695 if not path.startswith(self.root):
698 return False
696 return False
699 subpath = path[len(self.root) + 1:]
697 subpath = path[len(self.root) + 1:]
700 normsubpath = util.pconvert(subpath)
698 normsubpath = util.pconvert(subpath)
701
699
702 # XXX: Checking against the current working copy is wrong in
700 # XXX: Checking against the current working copy is wrong in
703 # the sense that it can reject things like
701 # the sense that it can reject things like
704 #
702 #
705 # $ hg cat -r 10 sub/x.txt
703 # $ hg cat -r 10 sub/x.txt
706 #
704 #
707 # if sub/ is no longer a subrepository in the working copy
705 # if sub/ is no longer a subrepository in the working copy
708 # parent revision.
706 # parent revision.
709 #
707 #
710 # However, it can of course also allow things that would have
708 # However, it can of course also allow things that would have
711 # been rejected before, such as the above cat command if sub/
709 # been rejected before, such as the above cat command if sub/
712 # is a subrepository now, but was a normal directory before.
710 # is a subrepository now, but was a normal directory before.
713 # The old path auditor would have rejected by mistake since it
711 # The old path auditor would have rejected by mistake since it
714 # panics when it sees sub/.hg/.
712 # panics when it sees sub/.hg/.
715 #
713 #
716 # All in all, checking against the working copy seems sensible
714 # All in all, checking against the working copy seems sensible
717 # since we want to prevent access to nested repositories on
715 # since we want to prevent access to nested repositories on
718 # the filesystem *now*.
716 # the filesystem *now*.
719 ctx = self[None]
717 ctx = self[None]
720 parts = util.splitpath(subpath)
718 parts = util.splitpath(subpath)
721 while parts:
719 while parts:
722 prefix = '/'.join(parts)
720 prefix = '/'.join(parts)
723 if prefix in ctx.substate:
721 if prefix in ctx.substate:
724 if prefix == normsubpath:
722 if prefix == normsubpath:
725 return True
723 return True
726 else:
724 else:
727 sub = ctx.sub(prefix)
725 sub = ctx.sub(prefix)
728 return sub.checknested(subpath[len(prefix) + 1:])
726 return sub.checknested(subpath[len(prefix) + 1:])
729 else:
727 else:
730 parts.pop()
728 parts.pop()
731 return False
729 return False
732
730
733 def peer(self):
731 def peer(self):
734 return localpeer(self) # not cached to avoid reference cycle
732 return localpeer(self) # not cached to avoid reference cycle
735
733
736 def unfiltered(self):
734 def unfiltered(self):
737 """Return unfiltered version of the repository
735 """Return unfiltered version of the repository
738
736
739 Intended to be overwritten by filtered repo."""
737 Intended to be overwritten by filtered repo."""
740 return self
738 return self
741
739
742 def filtered(self, name, visibilityexceptions=None):
740 def filtered(self, name, visibilityexceptions=None):
743 """Return a filtered version of a repository"""
741 """Return a filtered version of a repository"""
744 cls = repoview.newtype(self.unfiltered().__class__)
742 cls = repoview.newtype(self.unfiltered().__class__)
745 return cls(self, name, visibilityexceptions)
743 return cls(self, name, visibilityexceptions)
746
744
747 @repofilecache('bookmarks', 'bookmarks.current')
745 @repofilecache('bookmarks', 'bookmarks.current')
748 def _bookmarks(self):
746 def _bookmarks(self):
749 return bookmarks.bmstore(self)
747 return bookmarks.bmstore(self)
750
748
751 @property
749 @property
752 def _activebookmark(self):
750 def _activebookmark(self):
753 return self._bookmarks.active
751 return self._bookmarks.active
754
752
755 # _phasesets depend on changelog. what we need is to call
753 # _phasesets depend on changelog. what we need is to call
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
754 # _phasecache.invalidate() if '00changelog.i' was changed, but it
757 # can't be easily expressed in filecache mechanism.
755 # can't be easily expressed in filecache mechanism.
758 @storecache('phaseroots', '00changelog.i')
756 @storecache('phaseroots', '00changelog.i')
759 def _phasecache(self):
757 def _phasecache(self):
760 return phases.phasecache(self, self._phasedefaults)
758 return phases.phasecache(self, self._phasedefaults)
761
759
762 @storecache('obsstore')
760 @storecache('obsstore')
763 def obsstore(self):
761 def obsstore(self):
764 return obsolete.makestore(self.ui, self)
762 return obsolete.makestore(self.ui, self)
765
763
766 @storecache('00changelog.i')
764 @storecache('00changelog.i')
767 def changelog(self):
765 def changelog(self):
768 return changelog.changelog(self.svfs,
766 return changelog.changelog(self.svfs,
769 trypending=txnutil.mayhavepending(self.root))
767 trypending=txnutil.mayhavepending(self.root))
770
768
771 def _constructmanifest(self):
769 def _constructmanifest(self):
772 # This is a temporary function while we migrate from manifest to
770 # This is a temporary function while we migrate from manifest to
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
771 # manifestlog. It allows bundlerepo and unionrepo to intercept the
774 # manifest creation.
772 # manifest creation.
775 return manifest.manifestrevlog(self.svfs)
773 return manifest.manifestrevlog(self.svfs)
776
774
777 @storecache('00manifest.i')
775 @storecache('00manifest.i')
778 def manifestlog(self):
776 def manifestlog(self):
779 return manifest.manifestlog(self.svfs, self)
777 return manifest.manifestlog(self.svfs, self)
780
778
781 @repofilecache('dirstate')
779 @repofilecache('dirstate')
782 def dirstate(self):
780 def dirstate(self):
783 sparsematchfn = lambda: sparse.matcher(self)
781 sparsematchfn = lambda: sparse.matcher(self)
784
782
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
783 return dirstate.dirstate(self.vfs, self.ui, self.root,
786 self._dirstatevalidate, sparsematchfn)
784 self._dirstatevalidate, sparsematchfn)
787
785
788 def _dirstatevalidate(self, node):
786 def _dirstatevalidate(self, node):
789 try:
787 try:
790 self.changelog.rev(node)
788 self.changelog.rev(node)
791 return node
789 return node
792 except error.LookupError:
790 except error.LookupError:
793 if not self._dirstatevalidatewarned:
791 if not self._dirstatevalidatewarned:
794 self._dirstatevalidatewarned = True
792 self._dirstatevalidatewarned = True
795 self.ui.warn(_("warning: ignoring unknown"
793 self.ui.warn(_("warning: ignoring unknown"
796 " working parent %s!\n") % short(node))
794 " working parent %s!\n") % short(node))
797 return nullid
795 return nullid
798
796
799 @repofilecache(narrowspec.FILENAME)
797 @repofilecache(narrowspec.FILENAME)
800 def narrowpats(self):
798 def narrowpats(self):
801 """matcher patterns for this repository's narrowspec
799 """matcher patterns for this repository's narrowspec
802
800
803 A tuple of (includes, excludes).
801 A tuple of (includes, excludes).
804 """
802 """
805 source = self
803 source = self
806 if self.shared():
804 if self.shared():
807 from . import hg
805 from . import hg
808 source = hg.sharedreposource(self)
806 source = hg.sharedreposource(self)
809 return narrowspec.load(source)
807 return narrowspec.load(source)
810
808
811 @repofilecache(narrowspec.FILENAME)
809 @repofilecache(narrowspec.FILENAME)
812 def _narrowmatch(self):
810 def _narrowmatch(self):
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
811 if changegroup.NARROW_REQUIREMENT not in self.requirements:
814 return matchmod.always(self.root, '')
812 return matchmod.always(self.root, '')
815 include, exclude = self.narrowpats
813 include, exclude = self.narrowpats
816 return narrowspec.match(self.root, include=include, exclude=exclude)
814 return narrowspec.match(self.root, include=include, exclude=exclude)
817
815
818 # TODO(martinvonz): make this property-like instead?
816 # TODO(martinvonz): make this property-like instead?
819 def narrowmatch(self):
817 def narrowmatch(self):
820 return self._narrowmatch
818 return self._narrowmatch
821
819
822 def setnarrowpats(self, newincludes, newexcludes):
820 def setnarrowpats(self, newincludes, newexcludes):
823 target = self
821 target = self
824 if self.shared():
822 if self.shared():
825 from . import hg
823 from . import hg
826 target = hg.sharedreposource(self)
824 target = hg.sharedreposource(self)
827 narrowspec.save(target, newincludes, newexcludes)
825 narrowspec.save(target, newincludes, newexcludes)
828 self.invalidate(clearfilecache=True)
826 self.invalidate(clearfilecache=True)
829
827
830 def __getitem__(self, changeid):
828 def __getitem__(self, changeid):
831 if changeid is None:
829 if changeid is None:
832 return context.workingctx(self)
830 return context.workingctx(self)
833 if isinstance(changeid, context.basectx):
831 if isinstance(changeid, context.basectx):
834 return changeid
832 return changeid
835 if isinstance(changeid, slice):
833 if isinstance(changeid, slice):
836 # wdirrev isn't contiguous so the slice shouldn't include it
834 # wdirrev isn't contiguous so the slice shouldn't include it
837 return [context.changectx(self, i)
835 return [context.changectx(self, i)
838 for i in xrange(*changeid.indices(len(self)))
836 for i in xrange(*changeid.indices(len(self)))
839 if i not in self.changelog.filteredrevs]
837 if i not in self.changelog.filteredrevs]
840 try:
838 try:
841 return context.changectx(self, changeid)
839 return context.changectx(self, changeid)
842 except error.WdirUnsupported:
840 except error.WdirUnsupported:
843 return context.workingctx(self)
841 return context.workingctx(self)
844
842
845 def __contains__(self, changeid):
843 def __contains__(self, changeid):
846 """True if the given changeid exists
844 """True if the given changeid exists
847
845
848 error.LookupError is raised if an ambiguous node specified.
846 error.LookupError is raised if an ambiguous node specified.
849 """
847 """
850 try:
848 try:
851 self[changeid]
849 self[changeid]
852 return True
850 return True
853 except error.RepoLookupError:
851 except error.RepoLookupError:
854 return False
852 return False
855
853
856 def __nonzero__(self):
854 def __nonzero__(self):
857 return True
855 return True
858
856
859 __bool__ = __nonzero__
857 __bool__ = __nonzero__
860
858
861 def __len__(self):
859 def __len__(self):
862 # no need to pay the cost of repoview.changelog
860 # no need to pay the cost of repoview.changelog
863 unfi = self.unfiltered()
861 unfi = self.unfiltered()
864 return len(unfi.changelog)
862 return len(unfi.changelog)
865
863
866 def __iter__(self):
864 def __iter__(self):
867 return iter(self.changelog)
865 return iter(self.changelog)
868
866
869 def revs(self, expr, *args):
867 def revs(self, expr, *args):
870 '''Find revisions matching a revset.
868 '''Find revisions matching a revset.
871
869
872 The revset is specified as a string ``expr`` that may contain
870 The revset is specified as a string ``expr`` that may contain
873 %-formatting to escape certain types. See ``revsetlang.formatspec``.
871 %-formatting to escape certain types. See ``revsetlang.formatspec``.
874
872
875 Revset aliases from the configuration are not expanded. To expand
873 Revset aliases from the configuration are not expanded. To expand
876 user aliases, consider calling ``scmutil.revrange()`` or
874 user aliases, consider calling ``scmutil.revrange()`` or
877 ``repo.anyrevs([expr], user=True)``.
875 ``repo.anyrevs([expr], user=True)``.
878
876
879 Returns a revset.abstractsmartset, which is a list-like interface
877 Returns a revset.abstractsmartset, which is a list-like interface
880 that contains integer revisions.
878 that contains integer revisions.
881 '''
879 '''
882 expr = revsetlang.formatspec(expr, *args)
880 expr = revsetlang.formatspec(expr, *args)
883 m = revset.match(None, expr)
881 m = revset.match(None, expr)
884 return m(self)
882 return m(self)
885
883
886 def set(self, expr, *args):
884 def set(self, expr, *args):
887 '''Find revisions matching a revset and emit changectx instances.
885 '''Find revisions matching a revset and emit changectx instances.
888
886
889 This is a convenience wrapper around ``revs()`` that iterates the
887 This is a convenience wrapper around ``revs()`` that iterates the
890 result and is a generator of changectx instances.
888 result and is a generator of changectx instances.
891
889
892 Revset aliases from the configuration are not expanded. To expand
890 Revset aliases from the configuration are not expanded. To expand
893 user aliases, consider calling ``scmutil.revrange()``.
891 user aliases, consider calling ``scmutil.revrange()``.
894 '''
892 '''
895 for r in self.revs(expr, *args):
893 for r in self.revs(expr, *args):
896 yield self[r]
894 yield self[r]
897
895
898 def anyrevs(self, specs, user=False, localalias=None):
896 def anyrevs(self, specs, user=False, localalias=None):
899 '''Find revisions matching one of the given revsets.
897 '''Find revisions matching one of the given revsets.
900
898
901 Revset aliases from the configuration are not expanded by default. To
899 Revset aliases from the configuration are not expanded by default. To
902 expand user aliases, specify ``user=True``. To provide some local
900 expand user aliases, specify ``user=True``. To provide some local
903 definitions overriding user aliases, set ``localalias`` to
901 definitions overriding user aliases, set ``localalias`` to
904 ``{name: definitionstring}``.
902 ``{name: definitionstring}``.
905 '''
903 '''
906 if user:
904 if user:
907 m = revset.matchany(self.ui, specs,
905 m = revset.matchany(self.ui, specs,
908 lookup=revset.lookupfn(self),
906 lookup=revset.lookupfn(self),
909 localalias=localalias)
907 localalias=localalias)
910 else:
908 else:
911 m = revset.matchany(None, specs, localalias=localalias)
909 m = revset.matchany(None, specs, localalias=localalias)
912 return m(self)
910 return m(self)
913
911
914 def url(self):
912 def url(self):
915 return 'file:' + self.root
913 return 'file:' + self.root
916
914
917 def hook(self, name, throw=False, **args):
915 def hook(self, name, throw=False, **args):
918 """Call a hook, passing this repo instance.
916 """Call a hook, passing this repo instance.
919
917
920 This a convenience method to aid invoking hooks. Extensions likely
918 This a convenience method to aid invoking hooks. Extensions likely
921 won't call this unless they have registered a custom hook or are
919 won't call this unless they have registered a custom hook or are
922 replacing code that is expected to call a hook.
920 replacing code that is expected to call a hook.
923 """
921 """
924 return hook.hook(self.ui, self, name, throw, **args)
922 return hook.hook(self.ui, self, name, throw, **args)
925
923
926 @filteredpropertycache
924 @filteredpropertycache
927 def _tagscache(self):
925 def _tagscache(self):
928 '''Returns a tagscache object that contains various tags related
926 '''Returns a tagscache object that contains various tags related
929 caches.'''
927 caches.'''
930
928
931 # This simplifies its cache management by having one decorated
929 # This simplifies its cache management by having one decorated
932 # function (this one) and the rest simply fetch things from it.
930 # function (this one) and the rest simply fetch things from it.
933 class tagscache(object):
931 class tagscache(object):
934 def __init__(self):
932 def __init__(self):
935 # These two define the set of tags for this repository. tags
933 # These two define the set of tags for this repository. tags
936 # maps tag name to node; tagtypes maps tag name to 'global' or
934 # maps tag name to node; tagtypes maps tag name to 'global' or
937 # 'local'. (Global tags are defined by .hgtags across all
935 # 'local'. (Global tags are defined by .hgtags across all
938 # heads, and local tags are defined in .hg/localtags.)
936 # heads, and local tags are defined in .hg/localtags.)
939 # They constitute the in-memory cache of tags.
937 # They constitute the in-memory cache of tags.
940 self.tags = self.tagtypes = None
938 self.tags = self.tagtypes = None
941
939
942 self.nodetagscache = self.tagslist = None
940 self.nodetagscache = self.tagslist = None
943
941
944 cache = tagscache()
942 cache = tagscache()
945 cache.tags, cache.tagtypes = self._findtags()
943 cache.tags, cache.tagtypes = self._findtags()
946
944
947 return cache
945 return cache
948
946
949 def tags(self):
947 def tags(self):
950 '''return a mapping of tag to node'''
948 '''return a mapping of tag to node'''
951 t = {}
949 t = {}
952 if self.changelog.filteredrevs:
950 if self.changelog.filteredrevs:
953 tags, tt = self._findtags()
951 tags, tt = self._findtags()
954 else:
952 else:
955 tags = self._tagscache.tags
953 tags = self._tagscache.tags
956 for k, v in tags.iteritems():
954 for k, v in tags.iteritems():
957 try:
955 try:
958 # ignore tags to unknown nodes
956 # ignore tags to unknown nodes
959 self.changelog.rev(v)
957 self.changelog.rev(v)
960 t[k] = v
958 t[k] = v
961 except (error.LookupError, ValueError):
959 except (error.LookupError, ValueError):
962 pass
960 pass
963 return t
961 return t
964
962
965 def _findtags(self):
963 def _findtags(self):
966 '''Do the hard work of finding tags. Return a pair of dicts
964 '''Do the hard work of finding tags. Return a pair of dicts
967 (tags, tagtypes) where tags maps tag name to node, and tagtypes
965 (tags, tagtypes) where tags maps tag name to node, and tagtypes
968 maps tag name to a string like \'global\' or \'local\'.
966 maps tag name to a string like \'global\' or \'local\'.
969 Subclasses or extensions are free to add their own tags, but
967 Subclasses or extensions are free to add their own tags, but
970 should be aware that the returned dicts will be retained for the
968 should be aware that the returned dicts will be retained for the
971 duration of the localrepo object.'''
969 duration of the localrepo object.'''
972
970
973 # XXX what tagtype should subclasses/extensions use? Currently
971 # XXX what tagtype should subclasses/extensions use? Currently
974 # mq and bookmarks add tags, but do not set the tagtype at all.
972 # mq and bookmarks add tags, but do not set the tagtype at all.
975 # Should each extension invent its own tag type? Should there
973 # Should each extension invent its own tag type? Should there
976 # be one tagtype for all such "virtual" tags? Or is the status
974 # be one tagtype for all such "virtual" tags? Or is the status
977 # quo fine?
975 # quo fine?
978
976
979
977
980 # map tag name to (node, hist)
978 # map tag name to (node, hist)
981 alltags = tagsmod.findglobaltags(self.ui, self)
979 alltags = tagsmod.findglobaltags(self.ui, self)
982 # map tag name to tag type
980 # map tag name to tag type
983 tagtypes = dict((tag, 'global') for tag in alltags)
981 tagtypes = dict((tag, 'global') for tag in alltags)
984
982
985 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
983 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
986
984
987 # Build the return dicts. Have to re-encode tag names because
985 # Build the return dicts. Have to re-encode tag names because
988 # the tags module always uses UTF-8 (in order not to lose info
986 # the tags module always uses UTF-8 (in order not to lose info
989 # writing to the cache), but the rest of Mercurial wants them in
987 # writing to the cache), but the rest of Mercurial wants them in
990 # local encoding.
988 # local encoding.
991 tags = {}
989 tags = {}
992 for (name, (node, hist)) in alltags.iteritems():
990 for (name, (node, hist)) in alltags.iteritems():
993 if node != nullid:
991 if node != nullid:
994 tags[encoding.tolocal(name)] = node
992 tags[encoding.tolocal(name)] = node
995 tags['tip'] = self.changelog.tip()
993 tags['tip'] = self.changelog.tip()
996 tagtypes = dict([(encoding.tolocal(name), value)
994 tagtypes = dict([(encoding.tolocal(name), value)
997 for (name, value) in tagtypes.iteritems()])
995 for (name, value) in tagtypes.iteritems()])
998 return (tags, tagtypes)
996 return (tags, tagtypes)
999
997
1000 def tagtype(self, tagname):
998 def tagtype(self, tagname):
1001 '''
999 '''
1002 return the type of the given tag. result can be:
1000 return the type of the given tag. result can be:
1003
1001
1004 'local' : a local tag
1002 'local' : a local tag
1005 'global' : a global tag
1003 'global' : a global tag
1006 None : tag does not exist
1004 None : tag does not exist
1007 '''
1005 '''
1008
1006
1009 return self._tagscache.tagtypes.get(tagname)
1007 return self._tagscache.tagtypes.get(tagname)
1010
1008
1011 def tagslist(self):
1009 def tagslist(self):
1012 '''return a list of tags ordered by revision'''
1010 '''return a list of tags ordered by revision'''
1013 if not self._tagscache.tagslist:
1011 if not self._tagscache.tagslist:
1014 l = []
1012 l = []
1015 for t, n in self.tags().iteritems():
1013 for t, n in self.tags().iteritems():
1016 l.append((self.changelog.rev(n), t, n))
1014 l.append((self.changelog.rev(n), t, n))
1017 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1015 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1018
1016
1019 return self._tagscache.tagslist
1017 return self._tagscache.tagslist
1020
1018
1021 def nodetags(self, node):
1019 def nodetags(self, node):
1022 '''return the tags associated with a node'''
1020 '''return the tags associated with a node'''
1023 if not self._tagscache.nodetagscache:
1021 if not self._tagscache.nodetagscache:
1024 nodetagscache = {}
1022 nodetagscache = {}
1025 for t, n in self._tagscache.tags.iteritems():
1023 for t, n in self._tagscache.tags.iteritems():
1026 nodetagscache.setdefault(n, []).append(t)
1024 nodetagscache.setdefault(n, []).append(t)
1027 for tags in nodetagscache.itervalues():
1025 for tags in nodetagscache.itervalues():
1028 tags.sort()
1026 tags.sort()
1029 self._tagscache.nodetagscache = nodetagscache
1027 self._tagscache.nodetagscache = nodetagscache
1030 return self._tagscache.nodetagscache.get(node, [])
1028 return self._tagscache.nodetagscache.get(node, [])
1031
1029
1032 def nodebookmarks(self, node):
1030 def nodebookmarks(self, node):
1033 """return the list of bookmarks pointing to the specified node"""
1031 """return the list of bookmarks pointing to the specified node"""
1034 marks = []
1032 marks = []
1035 for bookmark, n in self._bookmarks.iteritems():
1033 for bookmark, n in self._bookmarks.iteritems():
1036 if n == node:
1034 if n == node:
1037 marks.append(bookmark)
1035 marks.append(bookmark)
1038 return sorted(marks)
1036 return sorted(marks)
1039
1037
1040 def branchmap(self):
1038 def branchmap(self):
1041 '''returns a dictionary {branch: [branchheads]} with branchheads
1039 '''returns a dictionary {branch: [branchheads]} with branchheads
1042 ordered by increasing revision number'''
1040 ordered by increasing revision number'''
1043 branchmap.updatecache(self)
1041 branchmap.updatecache(self)
1044 return self._branchcaches[self.filtername]
1042 return self._branchcaches[self.filtername]
1045
1043
1046 @unfilteredmethod
1044 @unfilteredmethod
1047 def revbranchcache(self):
1045 def revbranchcache(self):
1048 if not self._revbranchcache:
1046 if not self._revbranchcache:
1049 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1047 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1050 return self._revbranchcache
1048 return self._revbranchcache
1051
1049
1052 def branchtip(self, branch, ignoremissing=False):
1050 def branchtip(self, branch, ignoremissing=False):
1053 '''return the tip node for a given branch
1051 '''return the tip node for a given branch
1054
1052
1055 If ignoremissing is True, then this method will not raise an error.
1053 If ignoremissing is True, then this method will not raise an error.
1056 This is helpful for callers that only expect None for a missing branch
1054 This is helpful for callers that only expect None for a missing branch
1057 (e.g. namespace).
1055 (e.g. namespace).
1058
1056
1059 '''
1057 '''
1060 try:
1058 try:
1061 return self.branchmap().branchtip(branch)
1059 return self.branchmap().branchtip(branch)
1062 except KeyError:
1060 except KeyError:
1063 if not ignoremissing:
1061 if not ignoremissing:
1064 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1062 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1065 else:
1063 else:
1066 pass
1064 pass
1067
1065
1068 def lookup(self, key):
1066 def lookup(self, key):
1069 return scmutil.revsymbol(self, key).node()
1067 return scmutil.revsymbol(self, key).node()
1070
1068
1071 def lookupbranch(self, key):
1069 def lookupbranch(self, key):
1072 if key in self.branchmap():
1070 if key in self.branchmap():
1073 return key
1071 return key
1074
1072
1075 return scmutil.revsymbol(self, key).branch()
1073 return scmutil.revsymbol(self, key).branch()
1076
1074
1077 def known(self, nodes):
1075 def known(self, nodes):
1078 cl = self.changelog
1076 cl = self.changelog
1079 nm = cl.nodemap
1077 nm = cl.nodemap
1080 filtered = cl.filteredrevs
1078 filtered = cl.filteredrevs
1081 result = []
1079 result = []
1082 for n in nodes:
1080 for n in nodes:
1083 r = nm.get(n)
1081 r = nm.get(n)
1084 resp = not (r is None or r in filtered)
1082 resp = not (r is None or r in filtered)
1085 result.append(resp)
1083 result.append(resp)
1086 return result
1084 return result
1087
1085
1088 def local(self):
1086 def local(self):
1089 return self
1087 return self
1090
1088
1091 def publishing(self):
1089 def publishing(self):
1092 # it's safe (and desirable) to trust the publish flag unconditionally
1090 # it's safe (and desirable) to trust the publish flag unconditionally
1093 # so that we don't finalize changes shared between users via ssh or nfs
1091 # so that we don't finalize changes shared between users via ssh or nfs
1094 return self.ui.configbool('phases', 'publish', untrusted=True)
1092 return self.ui.configbool('phases', 'publish', untrusted=True)
1095
1093
1096 def cancopy(self):
1094 def cancopy(self):
1097 # so statichttprepo's override of local() works
1095 # so statichttprepo's override of local() works
1098 if not self.local():
1096 if not self.local():
1099 return False
1097 return False
1100 if not self.publishing():
1098 if not self.publishing():
1101 return True
1099 return True
1102 # if publishing we can't copy if there is filtered content
1100 # if publishing we can't copy if there is filtered content
1103 return not self.filtered('visible').changelog.filteredrevs
1101 return not self.filtered('visible').changelog.filteredrevs
1104
1102
1105 def shared(self):
1103 def shared(self):
1106 '''the type of shared repository (None if not shared)'''
1104 '''the type of shared repository (None if not shared)'''
1107 if self.sharedpath != self.path:
1105 if self.sharedpath != self.path:
1108 return 'store'
1106 return 'store'
1109 return None
1107 return None
1110
1108
1111 def wjoin(self, f, *insidef):
1109 def wjoin(self, f, *insidef):
1112 return self.vfs.reljoin(self.root, f, *insidef)
1110 return self.vfs.reljoin(self.root, f, *insidef)
1113
1111
1114 def file(self, f):
1112 def file(self, f):
1115 if f[0] == '/':
1113 if f[0] == '/':
1116 f = f[1:]
1114 f = f[1:]
1117 return filelog.filelog(self.svfs, f)
1115 return filelog.filelog(self.svfs, f)
1118
1116
1119 def setparents(self, p1, p2=nullid):
1117 def setparents(self, p1, p2=nullid):
1120 with self.dirstate.parentchange():
1118 with self.dirstate.parentchange():
1121 copies = self.dirstate.setparents(p1, p2)
1119 copies = self.dirstate.setparents(p1, p2)
1122 pctx = self[p1]
1120 pctx = self[p1]
1123 if copies:
1121 if copies:
1124 # Adjust copy records, the dirstate cannot do it, it
1122 # Adjust copy records, the dirstate cannot do it, it
1125 # requires access to parents manifests. Preserve them
1123 # requires access to parents manifests. Preserve them
1126 # only for entries added to first parent.
1124 # only for entries added to first parent.
1127 for f in copies:
1125 for f in copies:
1128 if f not in pctx and copies[f] in pctx:
1126 if f not in pctx and copies[f] in pctx:
1129 self.dirstate.copy(copies[f], f)
1127 self.dirstate.copy(copies[f], f)
1130 if p2 == nullid:
1128 if p2 == nullid:
1131 for f, s in sorted(self.dirstate.copies().items()):
1129 for f, s in sorted(self.dirstate.copies().items()):
1132 if f not in pctx and s not in pctx:
1130 if f not in pctx and s not in pctx:
1133 self.dirstate.copy(None, f)
1131 self.dirstate.copy(None, f)
1134
1132
1135 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1133 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1136 """changeid can be a changeset revision, node, or tag.
1134 """changeid can be a changeset revision, node, or tag.
1137 fileid can be a file revision or node."""
1135 fileid can be a file revision or node."""
1138 return context.filectx(self, path, changeid, fileid,
1136 return context.filectx(self, path, changeid, fileid,
1139 changectx=changectx)
1137 changectx=changectx)
1140
1138
1141 def getcwd(self):
1139 def getcwd(self):
1142 return self.dirstate.getcwd()
1140 return self.dirstate.getcwd()
1143
1141
1144 def pathto(self, f, cwd=None):
1142 def pathto(self, f, cwd=None):
1145 return self.dirstate.pathto(f, cwd)
1143 return self.dirstate.pathto(f, cwd)
1146
1144
1147 def _loadfilter(self, filter):
1145 def _loadfilter(self, filter):
1148 if filter not in self._filterpats:
1146 if filter not in self._filterpats:
1149 l = []
1147 l = []
1150 for pat, cmd in self.ui.configitems(filter):
1148 for pat, cmd in self.ui.configitems(filter):
1151 if cmd == '!':
1149 if cmd == '!':
1152 continue
1150 continue
1153 mf = matchmod.match(self.root, '', [pat])
1151 mf = matchmod.match(self.root, '', [pat])
1154 fn = None
1152 fn = None
1155 params = cmd
1153 params = cmd
1156 for name, filterfn in self._datafilters.iteritems():
1154 for name, filterfn in self._datafilters.iteritems():
1157 if cmd.startswith(name):
1155 if cmd.startswith(name):
1158 fn = filterfn
1156 fn = filterfn
1159 params = cmd[len(name):].lstrip()
1157 params = cmd[len(name):].lstrip()
1160 break
1158 break
1161 if not fn:
1159 if not fn:
1162 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1160 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1163 # Wrap old filters not supporting keyword arguments
1161 # Wrap old filters not supporting keyword arguments
1164 if not pycompat.getargspec(fn)[2]:
1162 if not pycompat.getargspec(fn)[2]:
1165 oldfn = fn
1163 oldfn = fn
1166 fn = lambda s, c, **kwargs: oldfn(s, c)
1164 fn = lambda s, c, **kwargs: oldfn(s, c)
1167 l.append((mf, fn, params))
1165 l.append((mf, fn, params))
1168 self._filterpats[filter] = l
1166 self._filterpats[filter] = l
1169 return self._filterpats[filter]
1167 return self._filterpats[filter]
1170
1168
1171 def _filter(self, filterpats, filename, data):
1169 def _filter(self, filterpats, filename, data):
1172 for mf, fn, cmd in filterpats:
1170 for mf, fn, cmd in filterpats:
1173 if mf(filename):
1171 if mf(filename):
1174 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1172 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1175 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1173 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1176 break
1174 break
1177
1175
1178 return data
1176 return data
1179
1177
1180 @unfilteredpropertycache
1178 @unfilteredpropertycache
1181 def _encodefilterpats(self):
1179 def _encodefilterpats(self):
1182 return self._loadfilter('encode')
1180 return self._loadfilter('encode')
1183
1181
1184 @unfilteredpropertycache
1182 @unfilteredpropertycache
1185 def _decodefilterpats(self):
1183 def _decodefilterpats(self):
1186 return self._loadfilter('decode')
1184 return self._loadfilter('decode')
1187
1185
1188 def adddatafilter(self, name, filter):
1186 def adddatafilter(self, name, filter):
1189 self._datafilters[name] = filter
1187 self._datafilters[name] = filter
1190
1188
1191 def wread(self, filename):
1189 def wread(self, filename):
1192 if self.wvfs.islink(filename):
1190 if self.wvfs.islink(filename):
1193 data = self.wvfs.readlink(filename)
1191 data = self.wvfs.readlink(filename)
1194 else:
1192 else:
1195 data = self.wvfs.read(filename)
1193 data = self.wvfs.read(filename)
1196 return self._filter(self._encodefilterpats, filename, data)
1194 return self._filter(self._encodefilterpats, filename, data)
1197
1195
1198 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1196 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1199 """write ``data`` into ``filename`` in the working directory
1197 """write ``data`` into ``filename`` in the working directory
1200
1198
1201 This returns length of written (maybe decoded) data.
1199 This returns length of written (maybe decoded) data.
1202 """
1200 """
1203 data = self._filter(self._decodefilterpats, filename, data)
1201 data = self._filter(self._decodefilterpats, filename, data)
1204 if 'l' in flags:
1202 if 'l' in flags:
1205 self.wvfs.symlink(data, filename)
1203 self.wvfs.symlink(data, filename)
1206 else:
1204 else:
1207 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1205 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1208 **kwargs)
1206 **kwargs)
1209 if 'x' in flags:
1207 if 'x' in flags:
1210 self.wvfs.setflags(filename, False, True)
1208 self.wvfs.setflags(filename, False, True)
1211 else:
1209 else:
1212 self.wvfs.setflags(filename, False, False)
1210 self.wvfs.setflags(filename, False, False)
1213 return len(data)
1211 return len(data)
1214
1212
1215 def wwritedata(self, filename, data):
1213 def wwritedata(self, filename, data):
1216 return self._filter(self._decodefilterpats, filename, data)
1214 return self._filter(self._decodefilterpats, filename, data)
1217
1215
1218 def currenttransaction(self):
1216 def currenttransaction(self):
1219 """return the current transaction or None if non exists"""
1217 """return the current transaction or None if non exists"""
1220 if self._transref:
1218 if self._transref:
1221 tr = self._transref()
1219 tr = self._transref()
1222 else:
1220 else:
1223 tr = None
1221 tr = None
1224
1222
1225 if tr and tr.running():
1223 if tr and tr.running():
1226 return tr
1224 return tr
1227 return None
1225 return None
1228
1226
1229 def transaction(self, desc, report=None):
1227 def transaction(self, desc, report=None):
1230 if (self.ui.configbool('devel', 'all-warnings')
1228 if (self.ui.configbool('devel', 'all-warnings')
1231 or self.ui.configbool('devel', 'check-locks')):
1229 or self.ui.configbool('devel', 'check-locks')):
1232 if self._currentlock(self._lockref) is None:
1230 if self._currentlock(self._lockref) is None:
1233 raise error.ProgrammingError('transaction requires locking')
1231 raise error.ProgrammingError('transaction requires locking')
1234 tr = self.currenttransaction()
1232 tr = self.currenttransaction()
1235 if tr is not None:
1233 if tr is not None:
1236 return tr.nest(name=desc)
1234 return tr.nest(name=desc)
1237
1235
1238 # abort here if the journal already exists
1236 # abort here if the journal already exists
1239 if self.svfs.exists("journal"):
1237 if self.svfs.exists("journal"):
1240 raise error.RepoError(
1238 raise error.RepoError(
1241 _("abandoned transaction found"),
1239 _("abandoned transaction found"),
1242 hint=_("run 'hg recover' to clean up transaction"))
1240 hint=_("run 'hg recover' to clean up transaction"))
1243
1241
1244 idbase = "%.40f#%f" % (random.random(), time.time())
1242 idbase = "%.40f#%f" % (random.random(), time.time())
1245 ha = hex(hashlib.sha1(idbase).digest())
1243 ha = hex(hashlib.sha1(idbase).digest())
1246 txnid = 'TXN:' + ha
1244 txnid = 'TXN:' + ha
1247 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1245 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1248
1246
1249 self._writejournal(desc)
1247 self._writejournal(desc)
1250 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1248 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1251 if report:
1249 if report:
1252 rp = report
1250 rp = report
1253 else:
1251 else:
1254 rp = self.ui.warn
1252 rp = self.ui.warn
1255 vfsmap = {'plain': self.vfs} # root of .hg/
1253 vfsmap = {'plain': self.vfs} # root of .hg/
1256 # we must avoid cyclic reference between repo and transaction.
1254 # we must avoid cyclic reference between repo and transaction.
1257 reporef = weakref.ref(self)
1255 reporef = weakref.ref(self)
1258 # Code to track tag movement
1256 # Code to track tag movement
1259 #
1257 #
1260 # Since tags are all handled as file content, it is actually quite hard
1258 # Since tags are all handled as file content, it is actually quite hard
1261 # to track these movement from a code perspective. So we fallback to a
1259 # to track these movement from a code perspective. So we fallback to a
1262 # tracking at the repository level. One could envision to track changes
1260 # tracking at the repository level. One could envision to track changes
1263 # to the '.hgtags' file through changegroup apply but that fails to
1261 # to the '.hgtags' file through changegroup apply but that fails to
1264 # cope with case where transaction expose new heads without changegroup
1262 # cope with case where transaction expose new heads without changegroup
1265 # being involved (eg: phase movement).
1263 # being involved (eg: phase movement).
1266 #
1264 #
1267 # For now, We gate the feature behind a flag since this likely comes
1265 # For now, We gate the feature behind a flag since this likely comes
1268 # with performance impacts. The current code run more often than needed
1266 # with performance impacts. The current code run more often than needed
1269 # and do not use caches as much as it could. The current focus is on
1267 # and do not use caches as much as it could. The current focus is on
1270 # the behavior of the feature so we disable it by default. The flag
1268 # the behavior of the feature so we disable it by default. The flag
1271 # will be removed when we are happy with the performance impact.
1269 # will be removed when we are happy with the performance impact.
1272 #
1270 #
1273 # Once this feature is no longer experimental move the following
1271 # Once this feature is no longer experimental move the following
1274 # documentation to the appropriate help section:
1272 # documentation to the appropriate help section:
1275 #
1273 #
1276 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1274 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1277 # tags (new or changed or deleted tags). In addition the details of
1275 # tags (new or changed or deleted tags). In addition the details of
1278 # these changes are made available in a file at:
1276 # these changes are made available in a file at:
1279 # ``REPOROOT/.hg/changes/tags.changes``.
1277 # ``REPOROOT/.hg/changes/tags.changes``.
1280 # Make sure you check for HG_TAG_MOVED before reading that file as it
1278 # Make sure you check for HG_TAG_MOVED before reading that file as it
1281 # might exist from a previous transaction even if no tag were touched
1279 # might exist from a previous transaction even if no tag were touched
1282 # in this one. Changes are recorded in a line base format::
1280 # in this one. Changes are recorded in a line base format::
1283 #
1281 #
1284 # <action> <hex-node> <tag-name>\n
1282 # <action> <hex-node> <tag-name>\n
1285 #
1283 #
1286 # Actions are defined as follow:
1284 # Actions are defined as follow:
1287 # "-R": tag is removed,
1285 # "-R": tag is removed,
1288 # "+A": tag is added,
1286 # "+A": tag is added,
1289 # "-M": tag is moved (old value),
1287 # "-M": tag is moved (old value),
1290 # "+M": tag is moved (new value),
1288 # "+M": tag is moved (new value),
1291 tracktags = lambda x: None
1289 tracktags = lambda x: None
1292 # experimental config: experimental.hook-track-tags
1290 # experimental config: experimental.hook-track-tags
1293 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1291 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1294 if desc != 'strip' and shouldtracktags:
1292 if desc != 'strip' and shouldtracktags:
1295 oldheads = self.changelog.headrevs()
1293 oldheads = self.changelog.headrevs()
1296 def tracktags(tr2):
1294 def tracktags(tr2):
1297 repo = reporef()
1295 repo = reporef()
1298 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1296 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1299 newheads = repo.changelog.headrevs()
1297 newheads = repo.changelog.headrevs()
1300 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1298 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1301 # notes: we compare lists here.
1299 # notes: we compare lists here.
1302 # As we do it only once buiding set would not be cheaper
1300 # As we do it only once buiding set would not be cheaper
1303 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1301 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1304 if changes:
1302 if changes:
1305 tr2.hookargs['tag_moved'] = '1'
1303 tr2.hookargs['tag_moved'] = '1'
1306 with repo.vfs('changes/tags.changes', 'w',
1304 with repo.vfs('changes/tags.changes', 'w',
1307 atomictemp=True) as changesfile:
1305 atomictemp=True) as changesfile:
1308 # note: we do not register the file to the transaction
1306 # note: we do not register the file to the transaction
1309 # because we needs it to still exist on the transaction
1307 # because we needs it to still exist on the transaction
1310 # is close (for txnclose hooks)
1308 # is close (for txnclose hooks)
1311 tagsmod.writediff(changesfile, changes)
1309 tagsmod.writediff(changesfile, changes)
1312 def validate(tr2):
1310 def validate(tr2):
1313 """will run pre-closing hooks"""
1311 """will run pre-closing hooks"""
1314 # XXX the transaction API is a bit lacking here so we take a hacky
1312 # XXX the transaction API is a bit lacking here so we take a hacky
1315 # path for now
1313 # path for now
1316 #
1314 #
1317 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1315 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1318 # dict is copied before these run. In addition we needs the data
1316 # dict is copied before these run. In addition we needs the data
1319 # available to in memory hooks too.
1317 # available to in memory hooks too.
1320 #
1318 #
1321 # Moreover, we also need to make sure this runs before txnclose
1319 # Moreover, we also need to make sure this runs before txnclose
1322 # hooks and there is no "pending" mechanism that would execute
1320 # hooks and there is no "pending" mechanism that would execute
1323 # logic only if hooks are about to run.
1321 # logic only if hooks are about to run.
1324 #
1322 #
1325 # Fixing this limitation of the transaction is also needed to track
1323 # Fixing this limitation of the transaction is also needed to track
1326 # other families of changes (bookmarks, phases, obsolescence).
1324 # other families of changes (bookmarks, phases, obsolescence).
1327 #
1325 #
1328 # This will have to be fixed before we remove the experimental
1326 # This will have to be fixed before we remove the experimental
1329 # gating.
1327 # gating.
1330 tracktags(tr2)
1328 tracktags(tr2)
1331 repo = reporef()
1329 repo = reporef()
1332 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1330 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1333 scmutil.enforcesinglehead(repo, tr2, desc)
1331 scmutil.enforcesinglehead(repo, tr2, desc)
1334 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1332 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1335 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1333 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1336 args = tr.hookargs.copy()
1334 args = tr.hookargs.copy()
1337 args.update(bookmarks.preparehookargs(name, old, new))
1335 args.update(bookmarks.preparehookargs(name, old, new))
1338 repo.hook('pretxnclose-bookmark', throw=True,
1336 repo.hook('pretxnclose-bookmark', throw=True,
1339 txnname=desc,
1337 txnname=desc,
1340 **pycompat.strkwargs(args))
1338 **pycompat.strkwargs(args))
1341 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1339 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1342 cl = repo.unfiltered().changelog
1340 cl = repo.unfiltered().changelog
1343 for rev, (old, new) in tr.changes['phases'].items():
1341 for rev, (old, new) in tr.changes['phases'].items():
1344 args = tr.hookargs.copy()
1342 args = tr.hookargs.copy()
1345 node = hex(cl.node(rev))
1343 node = hex(cl.node(rev))
1346 args.update(phases.preparehookargs(node, old, new))
1344 args.update(phases.preparehookargs(node, old, new))
1347 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1345 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1348 **pycompat.strkwargs(args))
1346 **pycompat.strkwargs(args))
1349
1347
1350 repo.hook('pretxnclose', throw=True,
1348 repo.hook('pretxnclose', throw=True,
1351 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1349 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1352 def releasefn(tr, success):
1350 def releasefn(tr, success):
1353 repo = reporef()
1351 repo = reporef()
1354 if success:
1352 if success:
1355 # this should be explicitly invoked here, because
1353 # this should be explicitly invoked here, because
1356 # in-memory changes aren't written out at closing
1354 # in-memory changes aren't written out at closing
1357 # transaction, if tr.addfilegenerator (via
1355 # transaction, if tr.addfilegenerator (via
1358 # dirstate.write or so) isn't invoked while
1356 # dirstate.write or so) isn't invoked while
1359 # transaction running
1357 # transaction running
1360 repo.dirstate.write(None)
1358 repo.dirstate.write(None)
1361 else:
1359 else:
1362 # discard all changes (including ones already written
1360 # discard all changes (including ones already written
1363 # out) in this transaction
1361 # out) in this transaction
1364 repo.dirstate.restorebackup(None, 'journal.dirstate')
1362 repo.dirstate.restorebackup(None, 'journal.dirstate')
1365
1363
1366 repo.invalidate(clearfilecache=True)
1364 repo.invalidate(clearfilecache=True)
1367
1365
1368 tr = transaction.transaction(rp, self.svfs, vfsmap,
1366 tr = transaction.transaction(rp, self.svfs, vfsmap,
1369 "journal",
1367 "journal",
1370 "undo",
1368 "undo",
1371 aftertrans(renames),
1369 aftertrans(renames),
1372 self.store.createmode,
1370 self.store.createmode,
1373 validator=validate,
1371 validator=validate,
1374 releasefn=releasefn,
1372 releasefn=releasefn,
1375 checkambigfiles=_cachedfiles,
1373 checkambigfiles=_cachedfiles,
1376 name=desc)
1374 name=desc)
1377 tr.changes['revs'] = xrange(0, 0)
1375 tr.changes['revs'] = xrange(0, 0)
1378 tr.changes['obsmarkers'] = set()
1376 tr.changes['obsmarkers'] = set()
1379 tr.changes['phases'] = {}
1377 tr.changes['phases'] = {}
1380 tr.changes['bookmarks'] = {}
1378 tr.changes['bookmarks'] = {}
1381
1379
1382 tr.hookargs['txnid'] = txnid
1380 tr.hookargs['txnid'] = txnid
1383 # note: writing the fncache only during finalize mean that the file is
1381 # note: writing the fncache only during finalize mean that the file is
1384 # outdated when running hooks. As fncache is used for streaming clone,
1382 # outdated when running hooks. As fncache is used for streaming clone,
1385 # this is not expected to break anything that happen during the hooks.
1383 # this is not expected to break anything that happen during the hooks.
1386 tr.addfinalize('flush-fncache', self.store.write)
1384 tr.addfinalize('flush-fncache', self.store.write)
1387 def txnclosehook(tr2):
1385 def txnclosehook(tr2):
1388 """To be run if transaction is successful, will schedule a hook run
1386 """To be run if transaction is successful, will schedule a hook run
1389 """
1387 """
1390 # Don't reference tr2 in hook() so we don't hold a reference.
1388 # Don't reference tr2 in hook() so we don't hold a reference.
1391 # This reduces memory consumption when there are multiple
1389 # This reduces memory consumption when there are multiple
1392 # transactions per lock. This can likely go away if issue5045
1390 # transactions per lock. This can likely go away if issue5045
1393 # fixes the function accumulation.
1391 # fixes the function accumulation.
1394 hookargs = tr2.hookargs
1392 hookargs = tr2.hookargs
1395
1393
1396 def hookfunc():
1394 def hookfunc():
1397 repo = reporef()
1395 repo = reporef()
1398 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1396 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1399 bmchanges = sorted(tr.changes['bookmarks'].items())
1397 bmchanges = sorted(tr.changes['bookmarks'].items())
1400 for name, (old, new) in bmchanges:
1398 for name, (old, new) in bmchanges:
1401 args = tr.hookargs.copy()
1399 args = tr.hookargs.copy()
1402 args.update(bookmarks.preparehookargs(name, old, new))
1400 args.update(bookmarks.preparehookargs(name, old, new))
1403 repo.hook('txnclose-bookmark', throw=False,
1401 repo.hook('txnclose-bookmark', throw=False,
1404 txnname=desc, **pycompat.strkwargs(args))
1402 txnname=desc, **pycompat.strkwargs(args))
1405
1403
1406 if hook.hashook(repo.ui, 'txnclose-phase'):
1404 if hook.hashook(repo.ui, 'txnclose-phase'):
1407 cl = repo.unfiltered().changelog
1405 cl = repo.unfiltered().changelog
1408 phasemv = sorted(tr.changes['phases'].items())
1406 phasemv = sorted(tr.changes['phases'].items())
1409 for rev, (old, new) in phasemv:
1407 for rev, (old, new) in phasemv:
1410 args = tr.hookargs.copy()
1408 args = tr.hookargs.copy()
1411 node = hex(cl.node(rev))
1409 node = hex(cl.node(rev))
1412 args.update(phases.preparehookargs(node, old, new))
1410 args.update(phases.preparehookargs(node, old, new))
1413 repo.hook('txnclose-phase', throw=False, txnname=desc,
1411 repo.hook('txnclose-phase', throw=False, txnname=desc,
1414 **pycompat.strkwargs(args))
1412 **pycompat.strkwargs(args))
1415
1413
1416 repo.hook('txnclose', throw=False, txnname=desc,
1414 repo.hook('txnclose', throw=False, txnname=desc,
1417 **pycompat.strkwargs(hookargs))
1415 **pycompat.strkwargs(hookargs))
1418 reporef()._afterlock(hookfunc)
1416 reporef()._afterlock(hookfunc)
1419 tr.addfinalize('txnclose-hook', txnclosehook)
1417 tr.addfinalize('txnclose-hook', txnclosehook)
1420 # Include a leading "-" to make it happen before the transaction summary
1418 # Include a leading "-" to make it happen before the transaction summary
1421 # reports registered via scmutil.registersummarycallback() whose names
1419 # reports registered via scmutil.registersummarycallback() whose names
1422 # are 00-txnreport etc. That way, the caches will be warm when the
1420 # are 00-txnreport etc. That way, the caches will be warm when the
1423 # callbacks run.
1421 # callbacks run.
1424 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1422 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1425 def txnaborthook(tr2):
1423 def txnaborthook(tr2):
1426 """To be run if transaction is aborted
1424 """To be run if transaction is aborted
1427 """
1425 """
1428 reporef().hook('txnabort', throw=False, txnname=desc,
1426 reporef().hook('txnabort', throw=False, txnname=desc,
1429 **pycompat.strkwargs(tr2.hookargs))
1427 **pycompat.strkwargs(tr2.hookargs))
1430 tr.addabort('txnabort-hook', txnaborthook)
1428 tr.addabort('txnabort-hook', txnaborthook)
1431 # avoid eager cache invalidation. in-memory data should be identical
1429 # avoid eager cache invalidation. in-memory data should be identical
1432 # to stored data if transaction has no error.
1430 # to stored data if transaction has no error.
1433 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1431 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1434 self._transref = weakref.ref(tr)
1432 self._transref = weakref.ref(tr)
1435 scmutil.registersummarycallback(self, tr, desc)
1433 scmutil.registersummarycallback(self, tr, desc)
1436 return tr
1434 return tr
1437
1435
1438 def _journalfiles(self):
1436 def _journalfiles(self):
1439 return ((self.svfs, 'journal'),
1437 return ((self.svfs, 'journal'),
1440 (self.vfs, 'journal.dirstate'),
1438 (self.vfs, 'journal.dirstate'),
1441 (self.vfs, 'journal.branch'),
1439 (self.vfs, 'journal.branch'),
1442 (self.vfs, 'journal.desc'),
1440 (self.vfs, 'journal.desc'),
1443 (self.vfs, 'journal.bookmarks'),
1441 (self.vfs, 'journal.bookmarks'),
1444 (self.svfs, 'journal.phaseroots'))
1442 (self.svfs, 'journal.phaseroots'))
1445
1443
1446 def undofiles(self):
1444 def undofiles(self):
1447 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1445 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1448
1446
1449 @unfilteredmethod
1447 @unfilteredmethod
1450 def _writejournal(self, desc):
1448 def _writejournal(self, desc):
1451 self.dirstate.savebackup(None, 'journal.dirstate')
1449 self.dirstate.savebackup(None, 'journal.dirstate')
1452 self.vfs.write("journal.branch",
1450 self.vfs.write("journal.branch",
1453 encoding.fromlocal(self.dirstate.branch()))
1451 encoding.fromlocal(self.dirstate.branch()))
1454 self.vfs.write("journal.desc",
1452 self.vfs.write("journal.desc",
1455 "%d\n%s\n" % (len(self), desc))
1453 "%d\n%s\n" % (len(self), desc))
1456 self.vfs.write("journal.bookmarks",
1454 self.vfs.write("journal.bookmarks",
1457 self.vfs.tryread("bookmarks"))
1455 self.vfs.tryread("bookmarks"))
1458 self.svfs.write("journal.phaseroots",
1456 self.svfs.write("journal.phaseroots",
1459 self.svfs.tryread("phaseroots"))
1457 self.svfs.tryread("phaseroots"))
1460
1458
1461 def recover(self):
1459 def recover(self):
1462 with self.lock():
1460 with self.lock():
1463 if self.svfs.exists("journal"):
1461 if self.svfs.exists("journal"):
1464 self.ui.status(_("rolling back interrupted transaction\n"))
1462 self.ui.status(_("rolling back interrupted transaction\n"))
1465 vfsmap = {'': self.svfs,
1463 vfsmap = {'': self.svfs,
1466 'plain': self.vfs,}
1464 'plain': self.vfs,}
1467 transaction.rollback(self.svfs, vfsmap, "journal",
1465 transaction.rollback(self.svfs, vfsmap, "journal",
1468 self.ui.warn,
1466 self.ui.warn,
1469 checkambigfiles=_cachedfiles)
1467 checkambigfiles=_cachedfiles)
1470 self.invalidate()
1468 self.invalidate()
1471 return True
1469 return True
1472 else:
1470 else:
1473 self.ui.warn(_("no interrupted transaction available\n"))
1471 self.ui.warn(_("no interrupted transaction available\n"))
1474 return False
1472 return False
1475
1473
1476 def rollback(self, dryrun=False, force=False):
1474 def rollback(self, dryrun=False, force=False):
1477 wlock = lock = dsguard = None
1475 wlock = lock = dsguard = None
1478 try:
1476 try:
1479 wlock = self.wlock()
1477 wlock = self.wlock()
1480 lock = self.lock()
1478 lock = self.lock()
1481 if self.svfs.exists("undo"):
1479 if self.svfs.exists("undo"):
1482 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1480 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1483
1481
1484 return self._rollback(dryrun, force, dsguard)
1482 return self._rollback(dryrun, force, dsguard)
1485 else:
1483 else:
1486 self.ui.warn(_("no rollback information available\n"))
1484 self.ui.warn(_("no rollback information available\n"))
1487 return 1
1485 return 1
1488 finally:
1486 finally:
1489 release(dsguard, lock, wlock)
1487 release(dsguard, lock, wlock)
1490
1488
1491 @unfilteredmethod # Until we get smarter cache management
1489 @unfilteredmethod # Until we get smarter cache management
1492 def _rollback(self, dryrun, force, dsguard):
1490 def _rollback(self, dryrun, force, dsguard):
1493 ui = self.ui
1491 ui = self.ui
1494 try:
1492 try:
1495 args = self.vfs.read('undo.desc').splitlines()
1493 args = self.vfs.read('undo.desc').splitlines()
1496 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1494 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1497 if len(args) >= 3:
1495 if len(args) >= 3:
1498 detail = args[2]
1496 detail = args[2]
1499 oldtip = oldlen - 1
1497 oldtip = oldlen - 1
1500
1498
1501 if detail and ui.verbose:
1499 if detail and ui.verbose:
1502 msg = (_('repository tip rolled back to revision %d'
1500 msg = (_('repository tip rolled back to revision %d'
1503 ' (undo %s: %s)\n')
1501 ' (undo %s: %s)\n')
1504 % (oldtip, desc, detail))
1502 % (oldtip, desc, detail))
1505 else:
1503 else:
1506 msg = (_('repository tip rolled back to revision %d'
1504 msg = (_('repository tip rolled back to revision %d'
1507 ' (undo %s)\n')
1505 ' (undo %s)\n')
1508 % (oldtip, desc))
1506 % (oldtip, desc))
1509 except IOError:
1507 except IOError:
1510 msg = _('rolling back unknown transaction\n')
1508 msg = _('rolling back unknown transaction\n')
1511 desc = None
1509 desc = None
1512
1510
1513 if not force and self['.'] != self['tip'] and desc == 'commit':
1511 if not force and self['.'] != self['tip'] and desc == 'commit':
1514 raise error.Abort(
1512 raise error.Abort(
1515 _('rollback of last commit while not checked out '
1513 _('rollback of last commit while not checked out '
1516 'may lose data'), hint=_('use -f to force'))
1514 'may lose data'), hint=_('use -f to force'))
1517
1515
1518 ui.status(msg)
1516 ui.status(msg)
1519 if dryrun:
1517 if dryrun:
1520 return 0
1518 return 0
1521
1519
1522 parents = self.dirstate.parents()
1520 parents = self.dirstate.parents()
1523 self.destroying()
1521 self.destroying()
1524 vfsmap = {'plain': self.vfs, '': self.svfs}
1522 vfsmap = {'plain': self.vfs, '': self.svfs}
1525 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1523 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1526 checkambigfiles=_cachedfiles)
1524 checkambigfiles=_cachedfiles)
1527 if self.vfs.exists('undo.bookmarks'):
1525 if self.vfs.exists('undo.bookmarks'):
1528 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1526 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1529 if self.svfs.exists('undo.phaseroots'):
1527 if self.svfs.exists('undo.phaseroots'):
1530 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1528 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1531 self.invalidate()
1529 self.invalidate()
1532
1530
1533 parentgone = (parents[0] not in self.changelog.nodemap or
1531 parentgone = (parents[0] not in self.changelog.nodemap or
1534 parents[1] not in self.changelog.nodemap)
1532 parents[1] not in self.changelog.nodemap)
1535 if parentgone:
1533 if parentgone:
1536 # prevent dirstateguard from overwriting already restored one
1534 # prevent dirstateguard from overwriting already restored one
1537 dsguard.close()
1535 dsguard.close()
1538
1536
1539 self.dirstate.restorebackup(None, 'undo.dirstate')
1537 self.dirstate.restorebackup(None, 'undo.dirstate')
1540 try:
1538 try:
1541 branch = self.vfs.read('undo.branch')
1539 branch = self.vfs.read('undo.branch')
1542 self.dirstate.setbranch(encoding.tolocal(branch))
1540 self.dirstate.setbranch(encoding.tolocal(branch))
1543 except IOError:
1541 except IOError:
1544 ui.warn(_('named branch could not be reset: '
1542 ui.warn(_('named branch could not be reset: '
1545 'current branch is still \'%s\'\n')
1543 'current branch is still \'%s\'\n')
1546 % self.dirstate.branch())
1544 % self.dirstate.branch())
1547
1545
1548 parents = tuple([p.rev() for p in self[None].parents()])
1546 parents = tuple([p.rev() for p in self[None].parents()])
1549 if len(parents) > 1:
1547 if len(parents) > 1:
1550 ui.status(_('working directory now based on '
1548 ui.status(_('working directory now based on '
1551 'revisions %d and %d\n') % parents)
1549 'revisions %d and %d\n') % parents)
1552 else:
1550 else:
1553 ui.status(_('working directory now based on '
1551 ui.status(_('working directory now based on '
1554 'revision %d\n') % parents)
1552 'revision %d\n') % parents)
1555 mergemod.mergestate.clean(self, self['.'].node())
1553 mergemod.mergestate.clean(self, self['.'].node())
1556
1554
1557 # TODO: if we know which new heads may result from this rollback, pass
1555 # TODO: if we know which new heads may result from this rollback, pass
1558 # them to destroy(), which will prevent the branchhead cache from being
1556 # them to destroy(), which will prevent the branchhead cache from being
1559 # invalidated.
1557 # invalidated.
1560 self.destroyed()
1558 self.destroyed()
1561 return 0
1559 return 0
1562
1560
1563 def _buildcacheupdater(self, newtransaction):
1561 def _buildcacheupdater(self, newtransaction):
1564 """called during transaction to build the callback updating cache
1562 """called during transaction to build the callback updating cache
1565
1563
1566 Lives on the repository to help extension who might want to augment
1564 Lives on the repository to help extension who might want to augment
1567 this logic. For this purpose, the created transaction is passed to the
1565 this logic. For this purpose, the created transaction is passed to the
1568 method.
1566 method.
1569 """
1567 """
1570 # we must avoid cyclic reference between repo and transaction.
1568 # we must avoid cyclic reference between repo and transaction.
1571 reporef = weakref.ref(self)
1569 reporef = weakref.ref(self)
1572 def updater(tr):
1570 def updater(tr):
1573 repo = reporef()
1571 repo = reporef()
1574 repo.updatecaches(tr)
1572 repo.updatecaches(tr)
1575 return updater
1573 return updater
1576
1574
1577 @unfilteredmethod
1575 @unfilteredmethod
1578 def updatecaches(self, tr=None, full=False):
1576 def updatecaches(self, tr=None, full=False):
1579 """warm appropriate caches
1577 """warm appropriate caches
1580
1578
1581 If this function is called after a transaction closed. The transaction
1579 If this function is called after a transaction closed. The transaction
1582 will be available in the 'tr' argument. This can be used to selectively
1580 will be available in the 'tr' argument. This can be used to selectively
1583 update caches relevant to the changes in that transaction.
1581 update caches relevant to the changes in that transaction.
1584
1582
1585 If 'full' is set, make sure all caches the function knows about have
1583 If 'full' is set, make sure all caches the function knows about have
1586 up-to-date data. Even the ones usually loaded more lazily.
1584 up-to-date data. Even the ones usually loaded more lazily.
1587 """
1585 """
1588 if tr is not None and tr.hookargs.get('source') == 'strip':
1586 if tr is not None and tr.hookargs.get('source') == 'strip':
1589 # During strip, many caches are invalid but
1587 # During strip, many caches are invalid but
1590 # later call to `destroyed` will refresh them.
1588 # later call to `destroyed` will refresh them.
1591 return
1589 return
1592
1590
1593 if tr is None or tr.changes['revs']:
1591 if tr is None or tr.changes['revs']:
1594 # updating the unfiltered branchmap should refresh all the others,
1592 # updating the unfiltered branchmap should refresh all the others,
1595 self.ui.debug('updating the branch cache\n')
1593 self.ui.debug('updating the branch cache\n')
1596 branchmap.updatecache(self.filtered('served'))
1594 branchmap.updatecache(self.filtered('served'))
1597
1595
1598 if full:
1596 if full:
1599 rbc = self.revbranchcache()
1597 rbc = self.revbranchcache()
1600 for r in self.changelog:
1598 for r in self.changelog:
1601 rbc.branchinfo(r)
1599 rbc.branchinfo(r)
1602 rbc.write()
1600 rbc.write()
1603
1601
1604 def invalidatecaches(self):
1602 def invalidatecaches(self):
1605
1603
1606 if '_tagscache' in vars(self):
1604 if '_tagscache' in vars(self):
1607 # can't use delattr on proxy
1605 # can't use delattr on proxy
1608 del self.__dict__['_tagscache']
1606 del self.__dict__['_tagscache']
1609
1607
1610 self.unfiltered()._branchcaches.clear()
1608 self.unfiltered()._branchcaches.clear()
1611 self.invalidatevolatilesets()
1609 self.invalidatevolatilesets()
1612 self._sparsesignaturecache.clear()
1610 self._sparsesignaturecache.clear()
1613
1611
1614 def invalidatevolatilesets(self):
1612 def invalidatevolatilesets(self):
1615 self.filteredrevcache.clear()
1613 self.filteredrevcache.clear()
1616 obsolete.clearobscaches(self)
1614 obsolete.clearobscaches(self)
1617
1615
1618 def invalidatedirstate(self):
1616 def invalidatedirstate(self):
1619 '''Invalidates the dirstate, causing the next call to dirstate
1617 '''Invalidates the dirstate, causing the next call to dirstate
1620 to check if it was modified since the last time it was read,
1618 to check if it was modified since the last time it was read,
1621 rereading it if it has.
1619 rereading it if it has.
1622
1620
1623 This is different to dirstate.invalidate() that it doesn't always
1621 This is different to dirstate.invalidate() that it doesn't always
1624 rereads the dirstate. Use dirstate.invalidate() if you want to
1622 rereads the dirstate. Use dirstate.invalidate() if you want to
1625 explicitly read the dirstate again (i.e. restoring it to a previous
1623 explicitly read the dirstate again (i.e. restoring it to a previous
1626 known good state).'''
1624 known good state).'''
1627 if hasunfilteredcache(self, 'dirstate'):
1625 if hasunfilteredcache(self, 'dirstate'):
1628 for k in self.dirstate._filecache:
1626 for k in self.dirstate._filecache:
1629 try:
1627 try:
1630 delattr(self.dirstate, k)
1628 delattr(self.dirstate, k)
1631 except AttributeError:
1629 except AttributeError:
1632 pass
1630 pass
1633 delattr(self.unfiltered(), 'dirstate')
1631 delattr(self.unfiltered(), 'dirstate')
1634
1632
1635 def invalidate(self, clearfilecache=False):
1633 def invalidate(self, clearfilecache=False):
1636 '''Invalidates both store and non-store parts other than dirstate
1634 '''Invalidates both store and non-store parts other than dirstate
1637
1635
1638 If a transaction is running, invalidation of store is omitted,
1636 If a transaction is running, invalidation of store is omitted,
1639 because discarding in-memory changes might cause inconsistency
1637 because discarding in-memory changes might cause inconsistency
1640 (e.g. incomplete fncache causes unintentional failure, but
1638 (e.g. incomplete fncache causes unintentional failure, but
1641 redundant one doesn't).
1639 redundant one doesn't).
1642 '''
1640 '''
1643 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1641 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1644 for k in list(self._filecache.keys()):
1642 for k in list(self._filecache.keys()):
1645 # dirstate is invalidated separately in invalidatedirstate()
1643 # dirstate is invalidated separately in invalidatedirstate()
1646 if k == 'dirstate':
1644 if k == 'dirstate':
1647 continue
1645 continue
1648 if (k == 'changelog' and
1646 if (k == 'changelog' and
1649 self.currenttransaction() and
1647 self.currenttransaction() and
1650 self.changelog._delayed):
1648 self.changelog._delayed):
1651 # The changelog object may store unwritten revisions. We don't
1649 # The changelog object may store unwritten revisions. We don't
1652 # want to lose them.
1650 # want to lose them.
1653 # TODO: Solve the problem instead of working around it.
1651 # TODO: Solve the problem instead of working around it.
1654 continue
1652 continue
1655
1653
1656 if clearfilecache:
1654 if clearfilecache:
1657 del self._filecache[k]
1655 del self._filecache[k]
1658 try:
1656 try:
1659 delattr(unfiltered, k)
1657 delattr(unfiltered, k)
1660 except AttributeError:
1658 except AttributeError:
1661 pass
1659 pass
1662 self.invalidatecaches()
1660 self.invalidatecaches()
1663 if not self.currenttransaction():
1661 if not self.currenttransaction():
1664 # TODO: Changing contents of store outside transaction
1662 # TODO: Changing contents of store outside transaction
1665 # causes inconsistency. We should make in-memory store
1663 # causes inconsistency. We should make in-memory store
1666 # changes detectable, and abort if changed.
1664 # changes detectable, and abort if changed.
1667 self.store.invalidatecaches()
1665 self.store.invalidatecaches()
1668
1666
1669 def invalidateall(self):
1667 def invalidateall(self):
1670 '''Fully invalidates both store and non-store parts, causing the
1668 '''Fully invalidates both store and non-store parts, causing the
1671 subsequent operation to reread any outside changes.'''
1669 subsequent operation to reread any outside changes.'''
1672 # extension should hook this to invalidate its caches
1670 # extension should hook this to invalidate its caches
1673 self.invalidate()
1671 self.invalidate()
1674 self.invalidatedirstate()
1672 self.invalidatedirstate()
1675
1673
1676 @unfilteredmethod
1674 @unfilteredmethod
1677 def _refreshfilecachestats(self, tr):
1675 def _refreshfilecachestats(self, tr):
1678 """Reload stats of cached files so that they are flagged as valid"""
1676 """Reload stats of cached files so that they are flagged as valid"""
1679 for k, ce in self._filecache.items():
1677 for k, ce in self._filecache.items():
1680 k = pycompat.sysstr(k)
1678 k = pycompat.sysstr(k)
1681 if k == r'dirstate' or k not in self.__dict__:
1679 if k == r'dirstate' or k not in self.__dict__:
1682 continue
1680 continue
1683 ce.refresh()
1681 ce.refresh()
1684
1682
1685 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1683 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1686 inheritchecker=None, parentenvvar=None):
1684 inheritchecker=None, parentenvvar=None):
1687 parentlock = None
1685 parentlock = None
1688 # the contents of parentenvvar are used by the underlying lock to
1686 # the contents of parentenvvar are used by the underlying lock to
1689 # determine whether it can be inherited
1687 # determine whether it can be inherited
1690 if parentenvvar is not None:
1688 if parentenvvar is not None:
1691 parentlock = encoding.environ.get(parentenvvar)
1689 parentlock = encoding.environ.get(parentenvvar)
1692
1690
1693 timeout = 0
1691 timeout = 0
1694 warntimeout = 0
1692 warntimeout = 0
1695 if wait:
1693 if wait:
1696 timeout = self.ui.configint("ui", "timeout")
1694 timeout = self.ui.configint("ui", "timeout")
1697 warntimeout = self.ui.configint("ui", "timeout.warn")
1695 warntimeout = self.ui.configint("ui", "timeout.warn")
1698
1696
1699 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1697 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1700 releasefn=releasefn,
1698 releasefn=releasefn,
1701 acquirefn=acquirefn, desc=desc,
1699 acquirefn=acquirefn, desc=desc,
1702 inheritchecker=inheritchecker,
1700 inheritchecker=inheritchecker,
1703 parentlock=parentlock)
1701 parentlock=parentlock)
1704 return l
1702 return l
1705
1703
1706 def _afterlock(self, callback):
1704 def _afterlock(self, callback):
1707 """add a callback to be run when the repository is fully unlocked
1705 """add a callback to be run when the repository is fully unlocked
1708
1706
1709 The callback will be executed when the outermost lock is released
1707 The callback will be executed when the outermost lock is released
1710 (with wlock being higher level than 'lock')."""
1708 (with wlock being higher level than 'lock')."""
1711 for ref in (self._wlockref, self._lockref):
1709 for ref in (self._wlockref, self._lockref):
1712 l = ref and ref()
1710 l = ref and ref()
1713 if l and l.held:
1711 if l and l.held:
1714 l.postrelease.append(callback)
1712 l.postrelease.append(callback)
1715 break
1713 break
1716 else: # no lock have been found.
1714 else: # no lock have been found.
1717 callback()
1715 callback()
1718
1716
1719 def lock(self, wait=True):
1717 def lock(self, wait=True):
1720 '''Lock the repository store (.hg/store) and return a weak reference
1718 '''Lock the repository store (.hg/store) and return a weak reference
1721 to the lock. Use this before modifying the store (e.g. committing or
1719 to the lock. Use this before modifying the store (e.g. committing or
1722 stripping). If you are opening a transaction, get a lock as well.)
1720 stripping). If you are opening a transaction, get a lock as well.)
1723
1721
1724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1722 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1725 'wlock' first to avoid a dead-lock hazard.'''
1723 'wlock' first to avoid a dead-lock hazard.'''
1726 l = self._currentlock(self._lockref)
1724 l = self._currentlock(self._lockref)
1727 if l is not None:
1725 if l is not None:
1728 l.lock()
1726 l.lock()
1729 return l
1727 return l
1730
1728
1731 l = self._lock(self.svfs, "lock", wait, None,
1729 l = self._lock(self.svfs, "lock", wait, None,
1732 self.invalidate, _('repository %s') % self.origroot)
1730 self.invalidate, _('repository %s') % self.origroot)
1733 self._lockref = weakref.ref(l)
1731 self._lockref = weakref.ref(l)
1734 return l
1732 return l
1735
1733
1736 def _wlockchecktransaction(self):
1734 def _wlockchecktransaction(self):
1737 if self.currenttransaction() is not None:
1735 if self.currenttransaction() is not None:
1738 raise error.LockInheritanceContractViolation(
1736 raise error.LockInheritanceContractViolation(
1739 'wlock cannot be inherited in the middle of a transaction')
1737 'wlock cannot be inherited in the middle of a transaction')
1740
1738
1741 def wlock(self, wait=True):
1739 def wlock(self, wait=True):
1742 '''Lock the non-store parts of the repository (everything under
1740 '''Lock the non-store parts of the repository (everything under
1743 .hg except .hg/store) and return a weak reference to the lock.
1741 .hg except .hg/store) and return a weak reference to the lock.
1744
1742
1745 Use this before modifying files in .hg.
1743 Use this before modifying files in .hg.
1746
1744
1747 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1745 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 'wlock' first to avoid a dead-lock hazard.'''
1746 'wlock' first to avoid a dead-lock hazard.'''
1749 l = self._wlockref and self._wlockref()
1747 l = self._wlockref and self._wlockref()
1750 if l is not None and l.held:
1748 if l is not None and l.held:
1751 l.lock()
1749 l.lock()
1752 return l
1750 return l
1753
1751
1754 # We do not need to check for non-waiting lock acquisition. Such
1752 # We do not need to check for non-waiting lock acquisition. Such
1755 # acquisition would not cause dead-lock as they would just fail.
1753 # acquisition would not cause dead-lock as they would just fail.
1756 if wait and (self.ui.configbool('devel', 'all-warnings')
1754 if wait and (self.ui.configbool('devel', 'all-warnings')
1757 or self.ui.configbool('devel', 'check-locks')):
1755 or self.ui.configbool('devel', 'check-locks')):
1758 if self._currentlock(self._lockref) is not None:
1756 if self._currentlock(self._lockref) is not None:
1759 self.ui.develwarn('"wlock" acquired after "lock"')
1757 self.ui.develwarn('"wlock" acquired after "lock"')
1760
1758
1761 def unlock():
1759 def unlock():
1762 if self.dirstate.pendingparentchange():
1760 if self.dirstate.pendingparentchange():
1763 self.dirstate.invalidate()
1761 self.dirstate.invalidate()
1764 else:
1762 else:
1765 self.dirstate.write(None)
1763 self.dirstate.write(None)
1766
1764
1767 self._filecache['dirstate'].refresh()
1765 self._filecache['dirstate'].refresh()
1768
1766
1769 l = self._lock(self.vfs, "wlock", wait, unlock,
1767 l = self._lock(self.vfs, "wlock", wait, unlock,
1770 self.invalidatedirstate, _('working directory of %s') %
1768 self.invalidatedirstate, _('working directory of %s') %
1771 self.origroot,
1769 self.origroot,
1772 inheritchecker=self._wlockchecktransaction,
1770 inheritchecker=self._wlockchecktransaction,
1773 parentenvvar='HG_WLOCK_LOCKER')
1771 parentenvvar='HG_WLOCK_LOCKER')
1774 self._wlockref = weakref.ref(l)
1772 self._wlockref = weakref.ref(l)
1775 return l
1773 return l
1776
1774
1777 def _currentlock(self, lockref):
1775 def _currentlock(self, lockref):
1778 """Returns the lock if it's held, or None if it's not."""
1776 """Returns the lock if it's held, or None if it's not."""
1779 if lockref is None:
1777 if lockref is None:
1780 return None
1778 return None
1781 l = lockref()
1779 l = lockref()
1782 if l is None or not l.held:
1780 if l is None or not l.held:
1783 return None
1781 return None
1784 return l
1782 return l
1785
1783
1786 def currentwlock(self):
1784 def currentwlock(self):
1787 """Returns the wlock if it's held, or None if it's not."""
1785 """Returns the wlock if it's held, or None if it's not."""
1788 return self._currentlock(self._wlockref)
1786 return self._currentlock(self._wlockref)
1789
1787
1790 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1791 """
1789 """
1792 commit an individual file as part of a larger transaction
1790 commit an individual file as part of a larger transaction
1793 """
1791 """
1794
1792
1795 fname = fctx.path()
1793 fname = fctx.path()
1796 fparent1 = manifest1.get(fname, nullid)
1794 fparent1 = manifest1.get(fname, nullid)
1797 fparent2 = manifest2.get(fname, nullid)
1795 fparent2 = manifest2.get(fname, nullid)
1798 if isinstance(fctx, context.filectx):
1796 if isinstance(fctx, context.filectx):
1799 node = fctx.filenode()
1797 node = fctx.filenode()
1800 if node in [fparent1, fparent2]:
1798 if node in [fparent1, fparent2]:
1801 self.ui.debug('reusing %s filelog entry\n' % fname)
1799 self.ui.debug('reusing %s filelog entry\n' % fname)
1802 if manifest1.flags(fname) != fctx.flags():
1800 if manifest1.flags(fname) != fctx.flags():
1803 changelist.append(fname)
1801 changelist.append(fname)
1804 return node
1802 return node
1805
1803
1806 flog = self.file(fname)
1804 flog = self.file(fname)
1807 meta = {}
1805 meta = {}
1808 copy = fctx.renamed()
1806 copy = fctx.renamed()
1809 if copy and copy[0] != fname:
1807 if copy and copy[0] != fname:
1810 # Mark the new revision of this file as a copy of another
1808 # Mark the new revision of this file as a copy of another
1811 # file. This copy data will effectively act as a parent
1809 # file. This copy data will effectively act as a parent
1812 # of this new revision. If this is a merge, the first
1810 # of this new revision. If this is a merge, the first
1813 # parent will be the nullid (meaning "look up the copy data")
1811 # parent will be the nullid (meaning "look up the copy data")
1814 # and the second one will be the other parent. For example:
1812 # and the second one will be the other parent. For example:
1815 #
1813 #
1816 # 0 --- 1 --- 3 rev1 changes file foo
1814 # 0 --- 1 --- 3 rev1 changes file foo
1817 # \ / rev2 renames foo to bar and changes it
1815 # \ / rev2 renames foo to bar and changes it
1818 # \- 2 -/ rev3 should have bar with all changes and
1816 # \- 2 -/ rev3 should have bar with all changes and
1819 # should record that bar descends from
1817 # should record that bar descends from
1820 # bar in rev2 and foo in rev1
1818 # bar in rev2 and foo in rev1
1821 #
1819 #
1822 # this allows this merge to succeed:
1820 # this allows this merge to succeed:
1823 #
1821 #
1824 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1822 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1825 # \ / merging rev3 and rev4 should use bar@rev2
1823 # \ / merging rev3 and rev4 should use bar@rev2
1826 # \- 2 --- 4 as the merge base
1824 # \- 2 --- 4 as the merge base
1827 #
1825 #
1828
1826
1829 cfname = copy[0]
1827 cfname = copy[0]
1830 crev = manifest1.get(cfname)
1828 crev = manifest1.get(cfname)
1831 newfparent = fparent2
1829 newfparent = fparent2
1832
1830
1833 if manifest2: # branch merge
1831 if manifest2: # branch merge
1834 if fparent2 == nullid or crev is None: # copied on remote side
1832 if fparent2 == nullid or crev is None: # copied on remote side
1835 if cfname in manifest2:
1833 if cfname in manifest2:
1836 crev = manifest2[cfname]
1834 crev = manifest2[cfname]
1837 newfparent = fparent1
1835 newfparent = fparent1
1838
1836
1839 # Here, we used to search backwards through history to try to find
1837 # Here, we used to search backwards through history to try to find
1840 # where the file copy came from if the source of a copy was not in
1838 # where the file copy came from if the source of a copy was not in
1841 # the parent directory. However, this doesn't actually make sense to
1839 # the parent directory. However, this doesn't actually make sense to
1842 # do (what does a copy from something not in your working copy even
1840 # do (what does a copy from something not in your working copy even
1843 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1841 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1844 # the user that copy information was dropped, so if they didn't
1842 # the user that copy information was dropped, so if they didn't
1845 # expect this outcome it can be fixed, but this is the correct
1843 # expect this outcome it can be fixed, but this is the correct
1846 # behavior in this circumstance.
1844 # behavior in this circumstance.
1847
1845
1848 if crev:
1846 if crev:
1849 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1847 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1850 meta["copy"] = cfname
1848 meta["copy"] = cfname
1851 meta["copyrev"] = hex(crev)
1849 meta["copyrev"] = hex(crev)
1852 fparent1, fparent2 = nullid, newfparent
1850 fparent1, fparent2 = nullid, newfparent
1853 else:
1851 else:
1854 self.ui.warn(_("warning: can't find ancestor for '%s' "
1852 self.ui.warn(_("warning: can't find ancestor for '%s' "
1855 "copied from '%s'!\n") % (fname, cfname))
1853 "copied from '%s'!\n") % (fname, cfname))
1856
1854
1857 elif fparent1 == nullid:
1855 elif fparent1 == nullid:
1858 fparent1, fparent2 = fparent2, nullid
1856 fparent1, fparent2 = fparent2, nullid
1859 elif fparent2 != nullid:
1857 elif fparent2 != nullid:
1860 # is one parent an ancestor of the other?
1858 # is one parent an ancestor of the other?
1861 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1859 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1862 if fparent1 in fparentancestors:
1860 if fparent1 in fparentancestors:
1863 fparent1, fparent2 = fparent2, nullid
1861 fparent1, fparent2 = fparent2, nullid
1864 elif fparent2 in fparentancestors:
1862 elif fparent2 in fparentancestors:
1865 fparent2 = nullid
1863 fparent2 = nullid
1866
1864
1867 # is the file changed?
1865 # is the file changed?
1868 text = fctx.data()
1866 text = fctx.data()
1869 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1867 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1870 changelist.append(fname)
1868 changelist.append(fname)
1871 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1869 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1872 # are just the flags changed during merge?
1870 # are just the flags changed during merge?
1873 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1871 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1874 changelist.append(fname)
1872 changelist.append(fname)
1875
1873
1876 return fparent1
1874 return fparent1
1877
1875
1878 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1876 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1879 """check for commit arguments that aren't committable"""
1877 """check for commit arguments that aren't committable"""
1880 if match.isexact() or match.prefix():
1878 if match.isexact() or match.prefix():
1881 matched = set(status.modified + status.added + status.removed)
1879 matched = set(status.modified + status.added + status.removed)
1882
1880
1883 for f in match.files():
1881 for f in match.files():
1884 f = self.dirstate.normalize(f)
1882 f = self.dirstate.normalize(f)
1885 if f == '.' or f in matched or f in wctx.substate:
1883 if f == '.' or f in matched or f in wctx.substate:
1886 continue
1884 continue
1887 if f in status.deleted:
1885 if f in status.deleted:
1888 fail(f, _('file not found!'))
1886 fail(f, _('file not found!'))
1889 if f in vdirs: # visited directory
1887 if f in vdirs: # visited directory
1890 d = f + '/'
1888 d = f + '/'
1891 for mf in matched:
1889 for mf in matched:
1892 if mf.startswith(d):
1890 if mf.startswith(d):
1893 break
1891 break
1894 else:
1892 else:
1895 fail(f, _("no match under directory!"))
1893 fail(f, _("no match under directory!"))
1896 elif f not in self.dirstate:
1894 elif f not in self.dirstate:
1897 fail(f, _("file not tracked!"))
1895 fail(f, _("file not tracked!"))
1898
1896
1899 @unfilteredmethod
1897 @unfilteredmethod
1900 def commit(self, text="", user=None, date=None, match=None, force=False,
1898 def commit(self, text="", user=None, date=None, match=None, force=False,
1901 editor=False, extra=None):
1899 editor=False, extra=None):
1902 """Add a new revision to current repository.
1900 """Add a new revision to current repository.
1903
1901
1904 Revision information is gathered from the working directory,
1902 Revision information is gathered from the working directory,
1905 match can be used to filter the committed files. If editor is
1903 match can be used to filter the committed files. If editor is
1906 supplied, it is called to get a commit message.
1904 supplied, it is called to get a commit message.
1907 """
1905 """
1908 if extra is None:
1906 if extra is None:
1909 extra = {}
1907 extra = {}
1910
1908
1911 def fail(f, msg):
1909 def fail(f, msg):
1912 raise error.Abort('%s: %s' % (f, msg))
1910 raise error.Abort('%s: %s' % (f, msg))
1913
1911
1914 if not match:
1912 if not match:
1915 match = matchmod.always(self.root, '')
1913 match = matchmod.always(self.root, '')
1916
1914
1917 if not force:
1915 if not force:
1918 vdirs = []
1916 vdirs = []
1919 match.explicitdir = vdirs.append
1917 match.explicitdir = vdirs.append
1920 match.bad = fail
1918 match.bad = fail
1921
1919
1922 wlock = lock = tr = None
1920 wlock = lock = tr = None
1923 try:
1921 try:
1924 wlock = self.wlock()
1922 wlock = self.wlock()
1925 lock = self.lock() # for recent changelog (see issue4368)
1923 lock = self.lock() # for recent changelog (see issue4368)
1926
1924
1927 wctx = self[None]
1925 wctx = self[None]
1928 merge = len(wctx.parents()) > 1
1926 merge = len(wctx.parents()) > 1
1929
1927
1930 if not force and merge and not match.always():
1928 if not force and merge and not match.always():
1931 raise error.Abort(_('cannot partially commit a merge '
1929 raise error.Abort(_('cannot partially commit a merge '
1932 '(do not specify files or patterns)'))
1930 '(do not specify files or patterns)'))
1933
1931
1934 status = self.status(match=match, clean=force)
1932 status = self.status(match=match, clean=force)
1935 if force:
1933 if force:
1936 status.modified.extend(status.clean) # mq may commit clean files
1934 status.modified.extend(status.clean) # mq may commit clean files
1937
1935
1938 # check subrepos
1936 # check subrepos
1939 subs, commitsubs, newstate = subrepoutil.precommit(
1937 subs, commitsubs, newstate = subrepoutil.precommit(
1940 self.ui, wctx, status, match, force=force)
1938 self.ui, wctx, status, match, force=force)
1941
1939
1942 # make sure all explicit patterns are matched
1940 # make sure all explicit patterns are matched
1943 if not force:
1941 if not force:
1944 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1942 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1945
1943
1946 cctx = context.workingcommitctx(self, status,
1944 cctx = context.workingcommitctx(self, status,
1947 text, user, date, extra)
1945 text, user, date, extra)
1948
1946
1949 # internal config: ui.allowemptycommit
1947 # internal config: ui.allowemptycommit
1950 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1948 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1951 or extra.get('close') or merge or cctx.files()
1949 or extra.get('close') or merge or cctx.files()
1952 or self.ui.configbool('ui', 'allowemptycommit'))
1950 or self.ui.configbool('ui', 'allowemptycommit'))
1953 if not allowemptycommit:
1951 if not allowemptycommit:
1954 return None
1952 return None
1955
1953
1956 if merge and cctx.deleted():
1954 if merge and cctx.deleted():
1957 raise error.Abort(_("cannot commit merge with missing files"))
1955 raise error.Abort(_("cannot commit merge with missing files"))
1958
1956
1959 ms = mergemod.mergestate.read(self)
1957 ms = mergemod.mergestate.read(self)
1960 mergeutil.checkunresolved(ms)
1958 mergeutil.checkunresolved(ms)
1961
1959
1962 if editor:
1960 if editor:
1963 cctx._text = editor(self, cctx, subs)
1961 cctx._text = editor(self, cctx, subs)
1964 edited = (text != cctx._text)
1962 edited = (text != cctx._text)
1965
1963
1966 # Save commit message in case this transaction gets rolled back
1964 # Save commit message in case this transaction gets rolled back
1967 # (e.g. by a pretxncommit hook). Leave the content alone on
1965 # (e.g. by a pretxncommit hook). Leave the content alone on
1968 # the assumption that the user will use the same editor again.
1966 # the assumption that the user will use the same editor again.
1969 msgfn = self.savecommitmessage(cctx._text)
1967 msgfn = self.savecommitmessage(cctx._text)
1970
1968
1971 # commit subs and write new state
1969 # commit subs and write new state
1972 if subs:
1970 if subs:
1973 for s in sorted(commitsubs):
1971 for s in sorted(commitsubs):
1974 sub = wctx.sub(s)
1972 sub = wctx.sub(s)
1975 self.ui.status(_('committing subrepository %s\n') %
1973 self.ui.status(_('committing subrepository %s\n') %
1976 subrepoutil.subrelpath(sub))
1974 subrepoutil.subrelpath(sub))
1977 sr = sub.commit(cctx._text, user, date)
1975 sr = sub.commit(cctx._text, user, date)
1978 newstate[s] = (newstate[s][0], sr)
1976 newstate[s] = (newstate[s][0], sr)
1979 subrepoutil.writestate(self, newstate)
1977 subrepoutil.writestate(self, newstate)
1980
1978
1981 p1, p2 = self.dirstate.parents()
1979 p1, p2 = self.dirstate.parents()
1982 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1980 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1983 try:
1981 try:
1984 self.hook("precommit", throw=True, parent1=hookp1,
1982 self.hook("precommit", throw=True, parent1=hookp1,
1985 parent2=hookp2)
1983 parent2=hookp2)
1986 tr = self.transaction('commit')
1984 tr = self.transaction('commit')
1987 ret = self.commitctx(cctx, True)
1985 ret = self.commitctx(cctx, True)
1988 except: # re-raises
1986 except: # re-raises
1989 if edited:
1987 if edited:
1990 self.ui.write(
1988 self.ui.write(
1991 _('note: commit message saved in %s\n') % msgfn)
1989 _('note: commit message saved in %s\n') % msgfn)
1992 raise
1990 raise
1993 # update bookmarks, dirstate and mergestate
1991 # update bookmarks, dirstate and mergestate
1994 bookmarks.update(self, [p1, p2], ret)
1992 bookmarks.update(self, [p1, p2], ret)
1995 cctx.markcommitted(ret)
1993 cctx.markcommitted(ret)
1996 ms.reset()
1994 ms.reset()
1997 tr.close()
1995 tr.close()
1998
1996
1999 finally:
1997 finally:
2000 lockmod.release(tr, lock, wlock)
1998 lockmod.release(tr, lock, wlock)
2001
1999
2002 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2000 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2003 # hack for command that use a temporary commit (eg: histedit)
2001 # hack for command that use a temporary commit (eg: histedit)
2004 # temporary commit got stripped before hook release
2002 # temporary commit got stripped before hook release
2005 if self.changelog.hasnode(ret):
2003 if self.changelog.hasnode(ret):
2006 self.hook("commit", node=node, parent1=parent1,
2004 self.hook("commit", node=node, parent1=parent1,
2007 parent2=parent2)
2005 parent2=parent2)
2008 self._afterlock(commithook)
2006 self._afterlock(commithook)
2009 return ret
2007 return ret
2010
2008
2011 @unfilteredmethod
2009 @unfilteredmethod
2012 def commitctx(self, ctx, error=False):
2010 def commitctx(self, ctx, error=False):
2013 """Add a new revision to current repository.
2011 """Add a new revision to current repository.
2014 Revision information is passed via the context argument.
2012 Revision information is passed via the context argument.
2015 """
2013 """
2016
2014
2017 tr = None
2015 tr = None
2018 p1, p2 = ctx.p1(), ctx.p2()
2016 p1, p2 = ctx.p1(), ctx.p2()
2019 user = ctx.user()
2017 user = ctx.user()
2020
2018
2021 lock = self.lock()
2019 lock = self.lock()
2022 try:
2020 try:
2023 tr = self.transaction("commit")
2021 tr = self.transaction("commit")
2024 trp = weakref.proxy(tr)
2022 trp = weakref.proxy(tr)
2025
2023
2026 if ctx.manifestnode():
2024 if ctx.manifestnode():
2027 # reuse an existing manifest revision
2025 # reuse an existing manifest revision
2028 mn = ctx.manifestnode()
2026 mn = ctx.manifestnode()
2029 files = ctx.files()
2027 files = ctx.files()
2030 elif ctx.files():
2028 elif ctx.files():
2031 m1ctx = p1.manifestctx()
2029 m1ctx = p1.manifestctx()
2032 m2ctx = p2.manifestctx()
2030 m2ctx = p2.manifestctx()
2033 mctx = m1ctx.copy()
2031 mctx = m1ctx.copy()
2034
2032
2035 m = mctx.read()
2033 m = mctx.read()
2036 m1 = m1ctx.read()
2034 m1 = m1ctx.read()
2037 m2 = m2ctx.read()
2035 m2 = m2ctx.read()
2038
2036
2039 # check in files
2037 # check in files
2040 added = []
2038 added = []
2041 changed = []
2039 changed = []
2042 removed = list(ctx.removed())
2040 removed = list(ctx.removed())
2043 linkrev = len(self)
2041 linkrev = len(self)
2044 self.ui.note(_("committing files:\n"))
2042 self.ui.note(_("committing files:\n"))
2045 for f in sorted(ctx.modified() + ctx.added()):
2043 for f in sorted(ctx.modified() + ctx.added()):
2046 self.ui.note(f + "\n")
2044 self.ui.note(f + "\n")
2047 try:
2045 try:
2048 fctx = ctx[f]
2046 fctx = ctx[f]
2049 if fctx is None:
2047 if fctx is None:
2050 removed.append(f)
2048 removed.append(f)
2051 else:
2049 else:
2052 added.append(f)
2050 added.append(f)
2053 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2051 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2054 trp, changed)
2052 trp, changed)
2055 m.setflag(f, fctx.flags())
2053 m.setflag(f, fctx.flags())
2056 except OSError as inst:
2054 except OSError as inst:
2057 self.ui.warn(_("trouble committing %s!\n") % f)
2055 self.ui.warn(_("trouble committing %s!\n") % f)
2058 raise
2056 raise
2059 except IOError as inst:
2057 except IOError as inst:
2060 errcode = getattr(inst, 'errno', errno.ENOENT)
2058 errcode = getattr(inst, 'errno', errno.ENOENT)
2061 if error or errcode and errcode != errno.ENOENT:
2059 if error or errcode and errcode != errno.ENOENT:
2062 self.ui.warn(_("trouble committing %s!\n") % f)
2060 self.ui.warn(_("trouble committing %s!\n") % f)
2063 raise
2061 raise
2064
2062
2065 # update manifest
2063 # update manifest
2066 self.ui.note(_("committing manifest\n"))
2064 self.ui.note(_("committing manifest\n"))
2067 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2065 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2068 drop = [f for f in removed if f in m]
2066 drop = [f for f in removed if f in m]
2069 for f in drop:
2067 for f in drop:
2070 del m[f]
2068 del m[f]
2071 mn = mctx.write(trp, linkrev,
2069 mn = mctx.write(trp, linkrev,
2072 p1.manifestnode(), p2.manifestnode(),
2070 p1.manifestnode(), p2.manifestnode(),
2073 added, drop)
2071 added, drop)
2074 files = changed + removed
2072 files = changed + removed
2075 else:
2073 else:
2076 mn = p1.manifestnode()
2074 mn = p1.manifestnode()
2077 files = []
2075 files = []
2078
2076
2079 # update changelog
2077 # update changelog
2080 self.ui.note(_("committing changelog\n"))
2078 self.ui.note(_("committing changelog\n"))
2081 self.changelog.delayupdate(tr)
2079 self.changelog.delayupdate(tr)
2082 n = self.changelog.add(mn, files, ctx.description(),
2080 n = self.changelog.add(mn, files, ctx.description(),
2083 trp, p1.node(), p2.node(),
2081 trp, p1.node(), p2.node(),
2084 user, ctx.date(), ctx.extra().copy())
2082 user, ctx.date(), ctx.extra().copy())
2085 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2083 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2086 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2084 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2087 parent2=xp2)
2085 parent2=xp2)
2088 # set the new commit is proper phase
2086 # set the new commit is proper phase
2089 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2087 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2090 if targetphase:
2088 if targetphase:
2091 # retract boundary do not alter parent changeset.
2089 # retract boundary do not alter parent changeset.
2092 # if a parent have higher the resulting phase will
2090 # if a parent have higher the resulting phase will
2093 # be compliant anyway
2091 # be compliant anyway
2094 #
2092 #
2095 # if minimal phase was 0 we don't need to retract anything
2093 # if minimal phase was 0 we don't need to retract anything
2096 phases.registernew(self, tr, targetphase, [n])
2094 phases.registernew(self, tr, targetphase, [n])
2097 tr.close()
2095 tr.close()
2098 return n
2096 return n
2099 finally:
2097 finally:
2100 if tr:
2098 if tr:
2101 tr.release()
2099 tr.release()
2102 lock.release()
2100 lock.release()
2103
2101
2104 @unfilteredmethod
2102 @unfilteredmethod
2105 def destroying(self):
2103 def destroying(self):
2106 '''Inform the repository that nodes are about to be destroyed.
2104 '''Inform the repository that nodes are about to be destroyed.
2107 Intended for use by strip and rollback, so there's a common
2105 Intended for use by strip and rollback, so there's a common
2108 place for anything that has to be done before destroying history.
2106 place for anything that has to be done before destroying history.
2109
2107
2110 This is mostly useful for saving state that is in memory and waiting
2108 This is mostly useful for saving state that is in memory and waiting
2111 to be flushed when the current lock is released. Because a call to
2109 to be flushed when the current lock is released. Because a call to
2112 destroyed is imminent, the repo will be invalidated causing those
2110 destroyed is imminent, the repo will be invalidated causing those
2113 changes to stay in memory (waiting for the next unlock), or vanish
2111 changes to stay in memory (waiting for the next unlock), or vanish
2114 completely.
2112 completely.
2115 '''
2113 '''
2116 # When using the same lock to commit and strip, the phasecache is left
2114 # When using the same lock to commit and strip, the phasecache is left
2117 # dirty after committing. Then when we strip, the repo is invalidated,
2115 # dirty after committing. Then when we strip, the repo is invalidated,
2118 # causing those changes to disappear.
2116 # causing those changes to disappear.
2119 if '_phasecache' in vars(self):
2117 if '_phasecache' in vars(self):
2120 self._phasecache.write()
2118 self._phasecache.write()
2121
2119
2122 @unfilteredmethod
2120 @unfilteredmethod
2123 def destroyed(self):
2121 def destroyed(self):
2124 '''Inform the repository that nodes have been destroyed.
2122 '''Inform the repository that nodes have been destroyed.
2125 Intended for use by strip and rollback, so there's a common
2123 Intended for use by strip and rollback, so there's a common
2126 place for anything that has to be done after destroying history.
2124 place for anything that has to be done after destroying history.
2127 '''
2125 '''
2128 # When one tries to:
2126 # When one tries to:
2129 # 1) destroy nodes thus calling this method (e.g. strip)
2127 # 1) destroy nodes thus calling this method (e.g. strip)
2130 # 2) use phasecache somewhere (e.g. commit)
2128 # 2) use phasecache somewhere (e.g. commit)
2131 #
2129 #
2132 # then 2) will fail because the phasecache contains nodes that were
2130 # then 2) will fail because the phasecache contains nodes that were
2133 # removed. We can either remove phasecache from the filecache,
2131 # removed. We can either remove phasecache from the filecache,
2134 # causing it to reload next time it is accessed, or simply filter
2132 # causing it to reload next time it is accessed, or simply filter
2135 # the removed nodes now and write the updated cache.
2133 # the removed nodes now and write the updated cache.
2136 self._phasecache.filterunknown(self)
2134 self._phasecache.filterunknown(self)
2137 self._phasecache.write()
2135 self._phasecache.write()
2138
2136
2139 # refresh all repository caches
2137 # refresh all repository caches
2140 self.updatecaches()
2138 self.updatecaches()
2141
2139
2142 # Ensure the persistent tag cache is updated. Doing it now
2140 # Ensure the persistent tag cache is updated. Doing it now
2143 # means that the tag cache only has to worry about destroyed
2141 # means that the tag cache only has to worry about destroyed
2144 # heads immediately after a strip/rollback. That in turn
2142 # heads immediately after a strip/rollback. That in turn
2145 # guarantees that "cachetip == currenttip" (comparing both rev
2143 # guarantees that "cachetip == currenttip" (comparing both rev
2146 # and node) always means no nodes have been added or destroyed.
2144 # and node) always means no nodes have been added or destroyed.
2147
2145
2148 # XXX this is suboptimal when qrefresh'ing: we strip the current
2146 # XXX this is suboptimal when qrefresh'ing: we strip the current
2149 # head, refresh the tag cache, then immediately add a new head.
2147 # head, refresh the tag cache, then immediately add a new head.
2150 # But I think doing it this way is necessary for the "instant
2148 # But I think doing it this way is necessary for the "instant
2151 # tag cache retrieval" case to work.
2149 # tag cache retrieval" case to work.
2152 self.invalidate()
2150 self.invalidate()
2153
2151
2154 def status(self, node1='.', node2=None, match=None,
2152 def status(self, node1='.', node2=None, match=None,
2155 ignored=False, clean=False, unknown=False,
2153 ignored=False, clean=False, unknown=False,
2156 listsubrepos=False):
2154 listsubrepos=False):
2157 '''a convenience method that calls node1.status(node2)'''
2155 '''a convenience method that calls node1.status(node2)'''
2158 return self[node1].status(node2, match, ignored, clean, unknown,
2156 return self[node1].status(node2, match, ignored, clean, unknown,
2159 listsubrepos)
2157 listsubrepos)
2160
2158
2161 def addpostdsstatus(self, ps):
2159 def addpostdsstatus(self, ps):
2162 """Add a callback to run within the wlock, at the point at which status
2160 """Add a callback to run within the wlock, at the point at which status
2163 fixups happen.
2161 fixups happen.
2164
2162
2165 On status completion, callback(wctx, status) will be called with the
2163 On status completion, callback(wctx, status) will be called with the
2166 wlock held, unless the dirstate has changed from underneath or the wlock
2164 wlock held, unless the dirstate has changed from underneath or the wlock
2167 couldn't be grabbed.
2165 couldn't be grabbed.
2168
2166
2169 Callbacks should not capture and use a cached copy of the dirstate --
2167 Callbacks should not capture and use a cached copy of the dirstate --
2170 it might change in the meanwhile. Instead, they should access the
2168 it might change in the meanwhile. Instead, they should access the
2171 dirstate via wctx.repo().dirstate.
2169 dirstate via wctx.repo().dirstate.
2172
2170
2173 This list is emptied out after each status run -- extensions should
2171 This list is emptied out after each status run -- extensions should
2174 make sure it adds to this list each time dirstate.status is called.
2172 make sure it adds to this list each time dirstate.status is called.
2175 Extensions should also make sure they don't call this for statuses
2173 Extensions should also make sure they don't call this for statuses
2176 that don't involve the dirstate.
2174 that don't involve the dirstate.
2177 """
2175 """
2178
2176
2179 # The list is located here for uniqueness reasons -- it is actually
2177 # The list is located here for uniqueness reasons -- it is actually
2180 # managed by the workingctx, but that isn't unique per-repo.
2178 # managed by the workingctx, but that isn't unique per-repo.
2181 self._postdsstatus.append(ps)
2179 self._postdsstatus.append(ps)
2182
2180
2183 def postdsstatus(self):
2181 def postdsstatus(self):
2184 """Used by workingctx to get the list of post-dirstate-status hooks."""
2182 """Used by workingctx to get the list of post-dirstate-status hooks."""
2185 return self._postdsstatus
2183 return self._postdsstatus
2186
2184
2187 def clearpostdsstatus(self):
2185 def clearpostdsstatus(self):
2188 """Used by workingctx to clear post-dirstate-status hooks."""
2186 """Used by workingctx to clear post-dirstate-status hooks."""
2189 del self._postdsstatus[:]
2187 del self._postdsstatus[:]
2190
2188
2191 def heads(self, start=None):
2189 def heads(self, start=None):
2192 if start is None:
2190 if start is None:
2193 cl = self.changelog
2191 cl = self.changelog
2194 headrevs = reversed(cl.headrevs())
2192 headrevs = reversed(cl.headrevs())
2195 return [cl.node(rev) for rev in headrevs]
2193 return [cl.node(rev) for rev in headrevs]
2196
2194
2197 heads = self.changelog.heads(start)
2195 heads = self.changelog.heads(start)
2198 # sort the output in rev descending order
2196 # sort the output in rev descending order
2199 return sorted(heads, key=self.changelog.rev, reverse=True)
2197 return sorted(heads, key=self.changelog.rev, reverse=True)
2200
2198
2201 def branchheads(self, branch=None, start=None, closed=False):
2199 def branchheads(self, branch=None, start=None, closed=False):
2202 '''return a (possibly filtered) list of heads for the given branch
2200 '''return a (possibly filtered) list of heads for the given branch
2203
2201
2204 Heads are returned in topological order, from newest to oldest.
2202 Heads are returned in topological order, from newest to oldest.
2205 If branch is None, use the dirstate branch.
2203 If branch is None, use the dirstate branch.
2206 If start is not None, return only heads reachable from start.
2204 If start is not None, return only heads reachable from start.
2207 If closed is True, return heads that are marked as closed as well.
2205 If closed is True, return heads that are marked as closed as well.
2208 '''
2206 '''
2209 if branch is None:
2207 if branch is None:
2210 branch = self[None].branch()
2208 branch = self[None].branch()
2211 branches = self.branchmap()
2209 branches = self.branchmap()
2212 if branch not in branches:
2210 if branch not in branches:
2213 return []
2211 return []
2214 # the cache returns heads ordered lowest to highest
2212 # the cache returns heads ordered lowest to highest
2215 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2213 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2216 if start is not None:
2214 if start is not None:
2217 # filter out the heads that cannot be reached from startrev
2215 # filter out the heads that cannot be reached from startrev
2218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2216 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2219 bheads = [h for h in bheads if h in fbheads]
2217 bheads = [h for h in bheads if h in fbheads]
2220 return bheads
2218 return bheads
2221
2219
2222 def branches(self, nodes):
2220 def branches(self, nodes):
2223 if not nodes:
2221 if not nodes:
2224 nodes = [self.changelog.tip()]
2222 nodes = [self.changelog.tip()]
2225 b = []
2223 b = []
2226 for n in nodes:
2224 for n in nodes:
2227 t = n
2225 t = n
2228 while True:
2226 while True:
2229 p = self.changelog.parents(n)
2227 p = self.changelog.parents(n)
2230 if p[1] != nullid or p[0] == nullid:
2228 if p[1] != nullid or p[0] == nullid:
2231 b.append((t, n, p[0], p[1]))
2229 b.append((t, n, p[0], p[1]))
2232 break
2230 break
2233 n = p[0]
2231 n = p[0]
2234 return b
2232 return b
2235
2233
2236 def between(self, pairs):
2234 def between(self, pairs):
2237 r = []
2235 r = []
2238
2236
2239 for top, bottom in pairs:
2237 for top, bottom in pairs:
2240 n, l, i = top, [], 0
2238 n, l, i = top, [], 0
2241 f = 1
2239 f = 1
2242
2240
2243 while n != bottom and n != nullid:
2241 while n != bottom and n != nullid:
2244 p = self.changelog.parents(n)[0]
2242 p = self.changelog.parents(n)[0]
2245 if i == f:
2243 if i == f:
2246 l.append(n)
2244 l.append(n)
2247 f = f * 2
2245 f = f * 2
2248 n = p
2246 n = p
2249 i += 1
2247 i += 1
2250
2248
2251 r.append(l)
2249 r.append(l)
2252
2250
2253 return r
2251 return r
2254
2252
2255 def checkpush(self, pushop):
2253 def checkpush(self, pushop):
2256 """Extensions can override this function if additional checks have
2254 """Extensions can override this function if additional checks have
2257 to be performed before pushing, or call it if they override push
2255 to be performed before pushing, or call it if they override push
2258 command.
2256 command.
2259 """
2257 """
2260
2258
2261 @unfilteredpropertycache
2259 @unfilteredpropertycache
2262 def prepushoutgoinghooks(self):
2260 def prepushoutgoinghooks(self):
2263 """Return util.hooks consists of a pushop with repo, remote, outgoing
2261 """Return util.hooks consists of a pushop with repo, remote, outgoing
2264 methods, which are called before pushing changesets.
2262 methods, which are called before pushing changesets.
2265 """
2263 """
2266 return util.hooks()
2264 return util.hooks()
2267
2265
2268 def pushkey(self, namespace, key, old, new):
2266 def pushkey(self, namespace, key, old, new):
2269 try:
2267 try:
2270 tr = self.currenttransaction()
2268 tr = self.currenttransaction()
2271 hookargs = {}
2269 hookargs = {}
2272 if tr is not None:
2270 if tr is not None:
2273 hookargs.update(tr.hookargs)
2271 hookargs.update(tr.hookargs)
2274 hookargs = pycompat.strkwargs(hookargs)
2272 hookargs = pycompat.strkwargs(hookargs)
2275 hookargs[r'namespace'] = namespace
2273 hookargs[r'namespace'] = namespace
2276 hookargs[r'key'] = key
2274 hookargs[r'key'] = key
2277 hookargs[r'old'] = old
2275 hookargs[r'old'] = old
2278 hookargs[r'new'] = new
2276 hookargs[r'new'] = new
2279 self.hook('prepushkey', throw=True, **hookargs)
2277 self.hook('prepushkey', throw=True, **hookargs)
2280 except error.HookAbort as exc:
2278 except error.HookAbort as exc:
2281 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2279 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2282 if exc.hint:
2280 if exc.hint:
2283 self.ui.write_err(_("(%s)\n") % exc.hint)
2281 self.ui.write_err(_("(%s)\n") % exc.hint)
2284 return False
2282 return False
2285 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2283 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2286 ret = pushkey.push(self, namespace, key, old, new)
2284 ret = pushkey.push(self, namespace, key, old, new)
2287 def runhook():
2285 def runhook():
2288 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2286 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2289 ret=ret)
2287 ret=ret)
2290 self._afterlock(runhook)
2288 self._afterlock(runhook)
2291 return ret
2289 return ret
2292
2290
2293 def listkeys(self, namespace):
2291 def listkeys(self, namespace):
2294 self.hook('prelistkeys', throw=True, namespace=namespace)
2292 self.hook('prelistkeys', throw=True, namespace=namespace)
2295 self.ui.debug('listing keys for "%s"\n' % namespace)
2293 self.ui.debug('listing keys for "%s"\n' % namespace)
2296 values = pushkey.list(self, namespace)
2294 values = pushkey.list(self, namespace)
2297 self.hook('listkeys', namespace=namespace, values=values)
2295 self.hook('listkeys', namespace=namespace, values=values)
2298 return values
2296 return values
2299
2297
2300 def debugwireargs(self, one, two, three=None, four=None, five=None):
2298 def debugwireargs(self, one, two, three=None, four=None, five=None):
2301 '''used to test argument passing over the wire'''
2299 '''used to test argument passing over the wire'''
2302 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2300 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2303 pycompat.bytestr(four),
2301 pycompat.bytestr(four),
2304 pycompat.bytestr(five))
2302 pycompat.bytestr(five))
2305
2303
2306 def savecommitmessage(self, text):
2304 def savecommitmessage(self, text):
2307 fp = self.vfs('last-message.txt', 'wb')
2305 fp = self.vfs('last-message.txt', 'wb')
2308 try:
2306 try:
2309 fp.write(text)
2307 fp.write(text)
2310 finally:
2308 finally:
2311 fp.close()
2309 fp.close()
2312 return self.pathto(fp.name[len(self.root) + 1:])
2310 return self.pathto(fp.name[len(self.root) + 1:])
2313
2311
2314 # used to avoid circular references so destructors work
2312 # used to avoid circular references so destructors work
2315 def aftertrans(files):
2313 def aftertrans(files):
2316 renamefiles = [tuple(t) for t in files]
2314 renamefiles = [tuple(t) for t in files]
2317 def a():
2315 def a():
2318 for vfs, src, dest in renamefiles:
2316 for vfs, src, dest in renamefiles:
2319 # if src and dest refer to a same file, vfs.rename is a no-op,
2317 # if src and dest refer to a same file, vfs.rename is a no-op,
2320 # leaving both src and dest on disk. delete dest to make sure
2318 # leaving both src and dest on disk. delete dest to make sure
2321 # the rename couldn't be such a no-op.
2319 # the rename couldn't be such a no-op.
2322 vfs.tryunlink(dest)
2320 vfs.tryunlink(dest)
2323 try:
2321 try:
2324 vfs.rename(src, dest)
2322 vfs.rename(src, dest)
2325 except OSError: # journal file does not yet exist
2323 except OSError: # journal file does not yet exist
2326 pass
2324 pass
2327 return a
2325 return a
2328
2326
2329 def undoname(fn):
2327 def undoname(fn):
2330 base, name = os.path.split(fn)
2328 base, name = os.path.split(fn)
2331 assert name.startswith('journal')
2329 assert name.startswith('journal')
2332 return os.path.join(base, name.replace('journal', 'undo', 1))
2330 return os.path.join(base, name.replace('journal', 'undo', 1))
2333
2331
2334 def instance(ui, path, create, intents=None):
2332 def instance(ui, path, create, intents=None):
2335 return localrepository(ui, util.urllocalpath(path), create,
2333 return localrepository(ui, util.urllocalpath(path), create,
2336 intents=intents)
2334 intents=intents)
2337
2335
2338 def islocal(path):
2336 def islocal(path):
2339 return True
2337 return True
2340
2338
2341 def newreporequirements(repo):
2339 def newreporequirements(repo):
2342 """Determine the set of requirements for a new local repository.
2340 """Determine the set of requirements for a new local repository.
2343
2341
2344 Extensions can wrap this function to specify custom requirements for
2342 Extensions can wrap this function to specify custom requirements for
2345 new repositories.
2343 new repositories.
2346 """
2344 """
2347 ui = repo.ui
2345 ui = repo.ui
2348 requirements = {'revlogv1'}
2346 requirements = {'revlogv1'}
2349 if ui.configbool('format', 'usestore'):
2347 if ui.configbool('format', 'usestore'):
2350 requirements.add('store')
2348 requirements.add('store')
2351 if ui.configbool('format', 'usefncache'):
2349 if ui.configbool('format', 'usefncache'):
2352 requirements.add('fncache')
2350 requirements.add('fncache')
2353 if ui.configbool('format', 'dotencode'):
2351 if ui.configbool('format', 'dotencode'):
2354 requirements.add('dotencode')
2352 requirements.add('dotencode')
2355
2353
2356 compengine = ui.config('experimental', 'format.compression')
2354 compengine = ui.config('experimental', 'format.compression')
2357 if compengine not in util.compengines:
2355 if compengine not in util.compengines:
2358 raise error.Abort(_('compression engine %s defined by '
2356 raise error.Abort(_('compression engine %s defined by '
2359 'experimental.format.compression not available') %
2357 'experimental.format.compression not available') %
2360 compengine,
2358 compengine,
2361 hint=_('run "hg debuginstall" to list available '
2359 hint=_('run "hg debuginstall" to list available '
2362 'compression engines'))
2360 'compression engines'))
2363
2361
2364 # zlib is the historical default and doesn't need an explicit requirement.
2362 # zlib is the historical default and doesn't need an explicit requirement.
2365 if compengine != 'zlib':
2363 if compengine != 'zlib':
2366 requirements.add('exp-compression-%s' % compengine)
2364 requirements.add('exp-compression-%s' % compengine)
2367
2365
2368 if scmutil.gdinitconfig(ui):
2366 if scmutil.gdinitconfig(ui):
2369 requirements.add('generaldelta')
2367 requirements.add('generaldelta')
2370 if ui.configbool('experimental', 'treemanifest'):
2368 if ui.configbool('experimental', 'treemanifest'):
2371 requirements.add('treemanifest')
2369 requirements.add('treemanifest')
2372
2370
2373 revlogv2 = ui.config('experimental', 'revlogv2')
2371 revlogv2 = ui.config('experimental', 'revlogv2')
2374 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2372 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2375 requirements.remove('revlogv1')
2373 requirements.remove('revlogv1')
2376 # generaldelta is implied by revlogv2.
2374 # generaldelta is implied by revlogv2.
2377 requirements.discard('generaldelta')
2375 requirements.discard('generaldelta')
2378 requirements.add(REVLOGV2_REQUIREMENT)
2376 requirements.add(REVLOGV2_REQUIREMENT)
2379
2377
2380 return requirements
2378 return requirements
@@ -1,994 +1,994 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .thirdparty.zope import (
12 interface as zi,
13 )
14 from . import (
11 from . import (
15 error,
12 error,
16 )
13 )
14 from .utils import (
15 interfaceutil,
16 )
17
17
18 class ipeerconnection(zi.Interface):
18 class ipeerconnection(interfaceutil.Interface):
19 """Represents a "connection" to a repository.
19 """Represents a "connection" to a repository.
20
20
21 This is the base interface for representing a connection to a repository.
21 This is the base interface for representing a connection to a repository.
22 It holds basic properties and methods applicable to all peer types.
22 It holds basic properties and methods applicable to all peer types.
23
23
24 This is not a complete interface definition and should not be used
24 This is not a complete interface definition and should not be used
25 outside of this module.
25 outside of this module.
26 """
26 """
27 ui = zi.Attribute("""ui.ui instance""")
27 ui = interfaceutil.Attribute("""ui.ui instance""")
28
28
29 def url():
29 def url():
30 """Returns a URL string representing this peer.
30 """Returns a URL string representing this peer.
31
31
32 Currently, implementations expose the raw URL used to construct the
32 Currently, implementations expose the raw URL used to construct the
33 instance. It may contain credentials as part of the URL. The
33 instance. It may contain credentials as part of the URL. The
34 expectations of the value aren't well-defined and this could lead to
34 expectations of the value aren't well-defined and this could lead to
35 data leakage.
35 data leakage.
36
36
37 TODO audit/clean consumers and more clearly define the contents of this
37 TODO audit/clean consumers and more clearly define the contents of this
38 value.
38 value.
39 """
39 """
40
40
41 def local():
41 def local():
42 """Returns a local repository instance.
42 """Returns a local repository instance.
43
43
44 If the peer represents a local repository, returns an object that
44 If the peer represents a local repository, returns an object that
45 can be used to interface with it. Otherwise returns ``None``.
45 can be used to interface with it. Otherwise returns ``None``.
46 """
46 """
47
47
48 def peer():
48 def peer():
49 """Returns an object conforming to this interface.
49 """Returns an object conforming to this interface.
50
50
51 Most implementations will ``return self``.
51 Most implementations will ``return self``.
52 """
52 """
53
53
54 def canpush():
54 def canpush():
55 """Returns a boolean indicating if this peer can be pushed to."""
55 """Returns a boolean indicating if this peer can be pushed to."""
56
56
57 def close():
57 def close():
58 """Close the connection to this peer.
58 """Close the connection to this peer.
59
59
60 This is called when the peer will no longer be used. Resources
60 This is called when the peer will no longer be used. Resources
61 associated with the peer should be cleaned up.
61 associated with the peer should be cleaned up.
62 """
62 """
63
63
64 class ipeercapabilities(zi.Interface):
64 class ipeercapabilities(interfaceutil.Interface):
65 """Peer sub-interface related to capabilities."""
65 """Peer sub-interface related to capabilities."""
66
66
67 def capable(name):
67 def capable(name):
68 """Determine support for a named capability.
68 """Determine support for a named capability.
69
69
70 Returns ``False`` if capability not supported.
70 Returns ``False`` if capability not supported.
71
71
72 Returns ``True`` if boolean capability is supported. Returns a string
72 Returns ``True`` if boolean capability is supported. Returns a string
73 if capability support is non-boolean.
73 if capability support is non-boolean.
74
74
75 Capability strings may or may not map to wire protocol capabilities.
75 Capability strings may or may not map to wire protocol capabilities.
76 """
76 """
77
77
78 def requirecap(name, purpose):
78 def requirecap(name, purpose):
79 """Require a capability to be present.
79 """Require a capability to be present.
80
80
81 Raises a ``CapabilityError`` if the capability isn't present.
81 Raises a ``CapabilityError`` if the capability isn't present.
82 """
82 """
83
83
84 class ipeercommands(zi.Interface):
84 class ipeercommands(interfaceutil.Interface):
85 """Client-side interface for communicating over the wire protocol.
85 """Client-side interface for communicating over the wire protocol.
86
86
87 This interface is used as a gateway to the Mercurial wire protocol.
87 This interface is used as a gateway to the Mercurial wire protocol.
88 methods commonly call wire protocol commands of the same name.
88 methods commonly call wire protocol commands of the same name.
89 """
89 """
90
90
91 def branchmap():
91 def branchmap():
92 """Obtain heads in named branches.
92 """Obtain heads in named branches.
93
93
94 Returns a dict mapping branch name to an iterable of nodes that are
94 Returns a dict mapping branch name to an iterable of nodes that are
95 heads on that branch.
95 heads on that branch.
96 """
96 """
97
97
98 def capabilities():
98 def capabilities():
99 """Obtain capabilities of the peer.
99 """Obtain capabilities of the peer.
100
100
101 Returns a set of string capabilities.
101 Returns a set of string capabilities.
102 """
102 """
103
103
104 def clonebundles():
104 def clonebundles():
105 """Obtains the clone bundles manifest for the repo.
105 """Obtains the clone bundles manifest for the repo.
106
106
107 Returns the manifest as unparsed bytes.
107 Returns the manifest as unparsed bytes.
108 """
108 """
109
109
110 def debugwireargs(one, two, three=None, four=None, five=None):
110 def debugwireargs(one, two, three=None, four=None, five=None):
111 """Used to facilitate debugging of arguments passed over the wire."""
111 """Used to facilitate debugging of arguments passed over the wire."""
112
112
113 def getbundle(source, **kwargs):
113 def getbundle(source, **kwargs):
114 """Obtain remote repository data as a bundle.
114 """Obtain remote repository data as a bundle.
115
115
116 This command is how the bulk of repository data is transferred from
116 This command is how the bulk of repository data is transferred from
117 the peer to the local repository
117 the peer to the local repository
118
118
119 Returns a generator of bundle data.
119 Returns a generator of bundle data.
120 """
120 """
121
121
122 def heads():
122 def heads():
123 """Determine all known head revisions in the peer.
123 """Determine all known head revisions in the peer.
124
124
125 Returns an iterable of binary nodes.
125 Returns an iterable of binary nodes.
126 """
126 """
127
127
128 def known(nodes):
128 def known(nodes):
129 """Determine whether multiple nodes are known.
129 """Determine whether multiple nodes are known.
130
130
131 Accepts an iterable of nodes whose presence to check for.
131 Accepts an iterable of nodes whose presence to check for.
132
132
133 Returns an iterable of booleans indicating of the corresponding node
133 Returns an iterable of booleans indicating of the corresponding node
134 at that index is known to the peer.
134 at that index is known to the peer.
135 """
135 """
136
136
137 def listkeys(namespace):
137 def listkeys(namespace):
138 """Obtain all keys in a pushkey namespace.
138 """Obtain all keys in a pushkey namespace.
139
139
140 Returns an iterable of key names.
140 Returns an iterable of key names.
141 """
141 """
142
142
143 def lookup(key):
143 def lookup(key):
144 """Resolve a value to a known revision.
144 """Resolve a value to a known revision.
145
145
146 Returns a binary node of the resolved revision on success.
146 Returns a binary node of the resolved revision on success.
147 """
147 """
148
148
149 def pushkey(namespace, key, old, new):
149 def pushkey(namespace, key, old, new):
150 """Set a value using the ``pushkey`` protocol.
150 """Set a value using the ``pushkey`` protocol.
151
151
152 Arguments correspond to the pushkey namespace and key to operate on and
152 Arguments correspond to the pushkey namespace and key to operate on and
153 the old and new values for that key.
153 the old and new values for that key.
154
154
155 Returns a string with the peer result. The value inside varies by the
155 Returns a string with the peer result. The value inside varies by the
156 namespace.
156 namespace.
157 """
157 """
158
158
159 def stream_out():
159 def stream_out():
160 """Obtain streaming clone data.
160 """Obtain streaming clone data.
161
161
162 Successful result should be a generator of data chunks.
162 Successful result should be a generator of data chunks.
163 """
163 """
164
164
165 def unbundle(bundle, heads, url):
165 def unbundle(bundle, heads, url):
166 """Transfer repository data to the peer.
166 """Transfer repository data to the peer.
167
167
168 This is how the bulk of data during a push is transferred.
168 This is how the bulk of data during a push is transferred.
169
169
170 Returns the integer number of heads added to the peer.
170 Returns the integer number of heads added to the peer.
171 """
171 """
172
172
173 class ipeerlegacycommands(zi.Interface):
173 class ipeerlegacycommands(interfaceutil.Interface):
174 """Interface for implementing support for legacy wire protocol commands.
174 """Interface for implementing support for legacy wire protocol commands.
175
175
176 Wire protocol commands transition to legacy status when they are no longer
176 Wire protocol commands transition to legacy status when they are no longer
177 used by modern clients. To facilitate identifying which commands are
177 used by modern clients. To facilitate identifying which commands are
178 legacy, the interfaces are split.
178 legacy, the interfaces are split.
179 """
179 """
180
180
181 def between(pairs):
181 def between(pairs):
182 """Obtain nodes between pairs of nodes.
182 """Obtain nodes between pairs of nodes.
183
183
184 ``pairs`` is an iterable of node pairs.
184 ``pairs`` is an iterable of node pairs.
185
185
186 Returns an iterable of iterables of nodes corresponding to each
186 Returns an iterable of iterables of nodes corresponding to each
187 requested pair.
187 requested pair.
188 """
188 """
189
189
190 def branches(nodes):
190 def branches(nodes):
191 """Obtain ancestor changesets of specific nodes back to a branch point.
191 """Obtain ancestor changesets of specific nodes back to a branch point.
192
192
193 For each requested node, the peer finds the first ancestor node that is
193 For each requested node, the peer finds the first ancestor node that is
194 a DAG root or is a merge.
194 a DAG root or is a merge.
195
195
196 Returns an iterable of iterables with the resolved values for each node.
196 Returns an iterable of iterables with the resolved values for each node.
197 """
197 """
198
198
199 def changegroup(nodes, source):
199 def changegroup(nodes, source):
200 """Obtain a changegroup with data for descendants of specified nodes."""
200 """Obtain a changegroup with data for descendants of specified nodes."""
201
201
202 def changegroupsubset(bases, heads, source):
202 def changegroupsubset(bases, heads, source):
203 pass
203 pass
204
204
205 class ipeercommandexecutor(zi.Interface):
205 class ipeercommandexecutor(interfaceutil.Interface):
206 """Represents a mechanism to execute remote commands.
206 """Represents a mechanism to execute remote commands.
207
207
208 This is the primary interface for requesting that wire protocol commands
208 This is the primary interface for requesting that wire protocol commands
209 be executed. Instances of this interface are active in a context manager
209 be executed. Instances of this interface are active in a context manager
210 and have a well-defined lifetime. When the context manager exits, all
210 and have a well-defined lifetime. When the context manager exits, all
211 outstanding requests are waited on.
211 outstanding requests are waited on.
212 """
212 """
213
213
214 def callcommand(name, args):
214 def callcommand(name, args):
215 """Request that a named command be executed.
215 """Request that a named command be executed.
216
216
217 Receives the command name and a dictionary of command arguments.
217 Receives the command name and a dictionary of command arguments.
218
218
219 Returns a ``concurrent.futures.Future`` that will resolve to the
219 Returns a ``concurrent.futures.Future`` that will resolve to the
220 result of that command request. That exact value is left up to
220 result of that command request. That exact value is left up to
221 the implementation and possibly varies by command.
221 the implementation and possibly varies by command.
222
222
223 Not all commands can coexist with other commands in an executor
223 Not all commands can coexist with other commands in an executor
224 instance: it depends on the underlying wire protocol transport being
224 instance: it depends on the underlying wire protocol transport being
225 used and the command itself.
225 used and the command itself.
226
226
227 Implementations MAY call ``sendcommands()`` automatically if the
227 Implementations MAY call ``sendcommands()`` automatically if the
228 requested command can not coexist with other commands in this executor.
228 requested command can not coexist with other commands in this executor.
229
229
230 Implementations MAY call ``sendcommands()`` automatically when the
230 Implementations MAY call ``sendcommands()`` automatically when the
231 future's ``result()`` is called. So, consumers using multiple
231 future's ``result()`` is called. So, consumers using multiple
232 commands with an executor MUST ensure that ``result()`` is not called
232 commands with an executor MUST ensure that ``result()`` is not called
233 until all command requests have been issued.
233 until all command requests have been issued.
234 """
234 """
235
235
236 def sendcommands():
236 def sendcommands():
237 """Trigger submission of queued command requests.
237 """Trigger submission of queued command requests.
238
238
239 Not all transports submit commands as soon as they are requested to
239 Not all transports submit commands as soon as they are requested to
240 run. When called, this method forces queued command requests to be
240 run. When called, this method forces queued command requests to be
241 issued. It will no-op if all commands have already been sent.
241 issued. It will no-op if all commands have already been sent.
242
242
243 When called, no more new commands may be issued with this executor.
243 When called, no more new commands may be issued with this executor.
244 """
244 """
245
245
246 def close():
246 def close():
247 """Signal that this command request is finished.
247 """Signal that this command request is finished.
248
248
249 When called, no more new commands may be issued. All outstanding
249 When called, no more new commands may be issued. All outstanding
250 commands that have previously been issued are waited on before
250 commands that have previously been issued are waited on before
251 returning. This not only includes waiting for the futures to resolve,
251 returning. This not only includes waiting for the futures to resolve,
252 but also waiting for all response data to arrive. In other words,
252 but also waiting for all response data to arrive. In other words,
253 calling this waits for all on-wire state for issued command requests
253 calling this waits for all on-wire state for issued command requests
254 to finish.
254 to finish.
255
255
256 When used as a context manager, this method is called when exiting the
256 When used as a context manager, this method is called when exiting the
257 context manager.
257 context manager.
258
258
259 This method may call ``sendcommands()`` if there are buffered commands.
259 This method may call ``sendcommands()`` if there are buffered commands.
260 """
260 """
261
261
262 class ipeerrequests(zi.Interface):
262 class ipeerrequests(interfaceutil.Interface):
263 """Interface for executing commands on a peer."""
263 """Interface for executing commands on a peer."""
264
264
265 def commandexecutor():
265 def commandexecutor():
266 """A context manager that resolves to an ipeercommandexecutor.
266 """A context manager that resolves to an ipeercommandexecutor.
267
267
268 The object this resolves to can be used to issue command requests
268 The object this resolves to can be used to issue command requests
269 to the peer.
269 to the peer.
270
270
271 Callers should call its ``callcommand`` method to issue command
271 Callers should call its ``callcommand`` method to issue command
272 requests.
272 requests.
273
273
274 A new executor should be obtained for each distinct set of commands
274 A new executor should be obtained for each distinct set of commands
275 (possibly just a single command) that the consumer wants to execute
275 (possibly just a single command) that the consumer wants to execute
276 as part of a single operation or round trip. This is because some
276 as part of a single operation or round trip. This is because some
277 peers are half-duplex and/or don't support persistent connections.
277 peers are half-duplex and/or don't support persistent connections.
278 e.g. in the case of HTTP peers, commands sent to an executor represent
278 e.g. in the case of HTTP peers, commands sent to an executor represent
279 a single HTTP request. While some peers may support multiple command
279 a single HTTP request. While some peers may support multiple command
280 sends over the wire per executor, consumers need to code to the least
280 sends over the wire per executor, consumers need to code to the least
281 capable peer. So it should be assumed that command executors buffer
281 capable peer. So it should be assumed that command executors buffer
282 called commands until they are told to send them and that each
282 called commands until they are told to send them and that each
283 command executor could result in a new connection or wire-level request
283 command executor could result in a new connection or wire-level request
284 being issued.
284 being issued.
285 """
285 """
286
286
287 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
287 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
288 """Unified interface for peer repositories.
288 """Unified interface for peer repositories.
289
289
290 All peer instances must conform to this interface.
290 All peer instances must conform to this interface.
291 """
291 """
292
292
293 @zi.implementer(ipeerbase)
293 @interfaceutil.implementer(ipeerbase)
294 class peer(object):
294 class peer(object):
295 """Base class for peer repositories."""
295 """Base class for peer repositories."""
296
296
297 def capable(self, name):
297 def capable(self, name):
298 caps = self.capabilities()
298 caps = self.capabilities()
299 if name in caps:
299 if name in caps:
300 return True
300 return True
301
301
302 name = '%s=' % name
302 name = '%s=' % name
303 for cap in caps:
303 for cap in caps:
304 if cap.startswith(name):
304 if cap.startswith(name):
305 return cap[len(name):]
305 return cap[len(name):]
306
306
307 return False
307 return False
308
308
309 def requirecap(self, name, purpose):
309 def requirecap(self, name, purpose):
310 if self.capable(name):
310 if self.capable(name):
311 return
311 return
312
312
313 raise error.CapabilityError(
313 raise error.CapabilityError(
314 _('cannot %s; remote repository does not support the %r '
314 _('cannot %s; remote repository does not support the %r '
315 'capability') % (purpose, name))
315 'capability') % (purpose, name))
316
316
317 class ifilerevisionssequence(zi.Interface):
317 class ifilerevisionssequence(interfaceutil.Interface):
318 """Contains index data for all revisions of a file.
318 """Contains index data for all revisions of a file.
319
319
320 Types implementing this behave like lists of tuples. The index
320 Types implementing this behave like lists of tuples. The index
321 in the list corresponds to the revision number. The values contain
321 in the list corresponds to the revision number. The values contain
322 index metadata.
322 index metadata.
323
323
324 The *null* revision (revision number -1) is always the last item
324 The *null* revision (revision number -1) is always the last item
325 in the index.
325 in the index.
326 """
326 """
327
327
328 def __len__():
328 def __len__():
329 """The total number of revisions."""
329 """The total number of revisions."""
330
330
331 def __getitem__(rev):
331 def __getitem__(rev):
332 """Returns the object having a specific revision number.
332 """Returns the object having a specific revision number.
333
333
334 Returns an 8-tuple with the following fields:
334 Returns an 8-tuple with the following fields:
335
335
336 offset+flags
336 offset+flags
337 Contains the offset and flags for the revision. 64-bit unsigned
337 Contains the offset and flags for the revision. 64-bit unsigned
338 integer where first 6 bytes are the offset and the next 2 bytes
338 integer where first 6 bytes are the offset and the next 2 bytes
339 are flags. The offset can be 0 if it is not used by the store.
339 are flags. The offset can be 0 if it is not used by the store.
340 compressed size
340 compressed size
341 Size of the revision data in the store. It can be 0 if it isn't
341 Size of the revision data in the store. It can be 0 if it isn't
342 needed by the store.
342 needed by the store.
343 uncompressed size
343 uncompressed size
344 Fulltext size. It can be 0 if it isn't needed by the store.
344 Fulltext size. It can be 0 if it isn't needed by the store.
345 base revision
345 base revision
346 Revision number of revision the delta for storage is encoded
346 Revision number of revision the delta for storage is encoded
347 against. -1 indicates not encoded against a base revision.
347 against. -1 indicates not encoded against a base revision.
348 link revision
348 link revision
349 Revision number of changelog revision this entry is related to.
349 Revision number of changelog revision this entry is related to.
350 p1 revision
350 p1 revision
351 Revision number of 1st parent. -1 if no 1st parent.
351 Revision number of 1st parent. -1 if no 1st parent.
352 p2 revision
352 p2 revision
353 Revision number of 2nd parent. -1 if no 1st parent.
353 Revision number of 2nd parent. -1 if no 1st parent.
354 node
354 node
355 Binary node value for this revision number.
355 Binary node value for this revision number.
356
356
357 Negative values should index off the end of the sequence. ``-1``
357 Negative values should index off the end of the sequence. ``-1``
358 should return the null revision. ``-2`` should return the most
358 should return the null revision. ``-2`` should return the most
359 recent revision.
359 recent revision.
360 """
360 """
361
361
362 def __contains__(rev):
362 def __contains__(rev):
363 """Whether a revision number exists."""
363 """Whether a revision number exists."""
364
364
365 def insert(self, i, entry):
365 def insert(self, i, entry):
366 """Add an item to the index at specific revision."""
366 """Add an item to the index at specific revision."""
367
367
368 class ifileindex(zi.Interface):
368 class ifileindex(interfaceutil.Interface):
369 """Storage interface for index data of a single file.
369 """Storage interface for index data of a single file.
370
370
371 File storage data is divided into index metadata and data storage.
371 File storage data is divided into index metadata and data storage.
372 This interface defines the index portion of the interface.
372 This interface defines the index portion of the interface.
373
373
374 The index logically consists of:
374 The index logically consists of:
375
375
376 * A mapping between revision numbers and nodes.
376 * A mapping between revision numbers and nodes.
377 * DAG data (storing and querying the relationship between nodes).
377 * DAG data (storing and querying the relationship between nodes).
378 * Metadata to facilitate storage.
378 * Metadata to facilitate storage.
379 """
379 """
380 index = zi.Attribute(
380 index = interfaceutil.Attribute(
381 """An ``ifilerevisionssequence`` instance.""")
381 """An ``ifilerevisionssequence`` instance.""")
382
382
383 def __len__():
383 def __len__():
384 """Obtain the number of revisions stored for this file."""
384 """Obtain the number of revisions stored for this file."""
385
385
386 def __iter__():
386 def __iter__():
387 """Iterate over revision numbers for this file."""
387 """Iterate over revision numbers for this file."""
388
388
389 def revs(start=0, stop=None):
389 def revs(start=0, stop=None):
390 """Iterate over revision numbers for this file, with control."""
390 """Iterate over revision numbers for this file, with control."""
391
391
392 def parents(node):
392 def parents(node):
393 """Returns a 2-tuple of parent nodes for a revision.
393 """Returns a 2-tuple of parent nodes for a revision.
394
394
395 Values will be ``nullid`` if the parent is empty.
395 Values will be ``nullid`` if the parent is empty.
396 """
396 """
397
397
398 def parentrevs(rev):
398 def parentrevs(rev):
399 """Like parents() but operates on revision numbers."""
399 """Like parents() but operates on revision numbers."""
400
400
401 def rev(node):
401 def rev(node):
402 """Obtain the revision number given a node.
402 """Obtain the revision number given a node.
403
403
404 Raises ``error.LookupError`` if the node is not known.
404 Raises ``error.LookupError`` if the node is not known.
405 """
405 """
406
406
407 def node(rev):
407 def node(rev):
408 """Obtain the node value given a revision number.
408 """Obtain the node value given a revision number.
409
409
410 Raises ``IndexError`` if the node is not known.
410 Raises ``IndexError`` if the node is not known.
411 """
411 """
412
412
413 def lookup(node):
413 def lookup(node):
414 """Attempt to resolve a value to a node.
414 """Attempt to resolve a value to a node.
415
415
416 Value can be a binary node, hex node, revision number, or a string
416 Value can be a binary node, hex node, revision number, or a string
417 that can be converted to an integer.
417 that can be converted to an integer.
418
418
419 Raises ``error.LookupError`` if a node could not be resolved.
419 Raises ``error.LookupError`` if a node could not be resolved.
420 """
420 """
421
421
422 def linkrev(rev):
422 def linkrev(rev):
423 """Obtain the changeset revision number a revision is linked to."""
423 """Obtain the changeset revision number a revision is linked to."""
424
424
425 def flags(rev):
425 def flags(rev):
426 """Obtain flags used to affect storage of a revision."""
426 """Obtain flags used to affect storage of a revision."""
427
427
428 def iscensored(rev):
428 def iscensored(rev):
429 """Return whether a revision's content has been censored."""
429 """Return whether a revision's content has been censored."""
430
430
431 def commonancestorsheads(node1, node2):
431 def commonancestorsheads(node1, node2):
432 """Obtain an iterable of nodes containing heads of common ancestors.
432 """Obtain an iterable of nodes containing heads of common ancestors.
433
433
434 See ``ancestor.commonancestorsheads()``.
434 See ``ancestor.commonancestorsheads()``.
435 """
435 """
436
436
437 def descendants(revs):
437 def descendants(revs):
438 """Obtain descendant revision numbers for a set of revision numbers.
438 """Obtain descendant revision numbers for a set of revision numbers.
439
439
440 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
440 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
441 """
441 """
442
442
443 def headrevs():
443 def headrevs():
444 """Obtain a list of revision numbers that are DAG heads.
444 """Obtain a list of revision numbers that are DAG heads.
445
445
446 The list is sorted oldest to newest.
446 The list is sorted oldest to newest.
447
447
448 TODO determine if sorting is required.
448 TODO determine if sorting is required.
449 """
449 """
450
450
451 def heads(start=None, stop=None):
451 def heads(start=None, stop=None):
452 """Obtain a list of nodes that are DAG heads, with control.
452 """Obtain a list of nodes that are DAG heads, with control.
453
453
454 The set of revisions examined can be limited by specifying
454 The set of revisions examined can be limited by specifying
455 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
455 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
456 iterable of nodes. DAG traversal starts at earlier revision
456 iterable of nodes. DAG traversal starts at earlier revision
457 ``start`` and iterates forward until any node in ``stop`` is
457 ``start`` and iterates forward until any node in ``stop`` is
458 encountered.
458 encountered.
459 """
459 """
460
460
461 def children(node):
461 def children(node):
462 """Obtain nodes that are children of a node.
462 """Obtain nodes that are children of a node.
463
463
464 Returns a list of nodes.
464 Returns a list of nodes.
465 """
465 """
466
466
467 def deltaparent(rev):
467 def deltaparent(rev):
468 """"Return the revision that is a suitable parent to delta against."""
468 """"Return the revision that is a suitable parent to delta against."""
469
469
470 def candelta(baserev, rev):
470 def candelta(baserev, rev):
471 """"Whether a delta can be generated between two revisions."""
471 """"Whether a delta can be generated between two revisions."""
472
472
473 class ifiledata(zi.Interface):
473 class ifiledata(interfaceutil.Interface):
474 """Storage interface for data storage of a specific file.
474 """Storage interface for data storage of a specific file.
475
475
476 This complements ``ifileindex`` and provides an interface for accessing
476 This complements ``ifileindex`` and provides an interface for accessing
477 data for a tracked file.
477 data for a tracked file.
478 """
478 """
479 def rawsize(rev):
479 def rawsize(rev):
480 """The size of the fulltext data for a revision as stored."""
480 """The size of the fulltext data for a revision as stored."""
481
481
482 def size(rev):
482 def size(rev):
483 """Obtain the fulltext size of file data.
483 """Obtain the fulltext size of file data.
484
484
485 Any metadata is excluded from size measurements. Use ``rawsize()`` if
485 Any metadata is excluded from size measurements. Use ``rawsize()`` if
486 metadata size is important.
486 metadata size is important.
487 """
487 """
488
488
489 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
489 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
490 """Validate the stored hash of a given fulltext and node.
490 """Validate the stored hash of a given fulltext and node.
491
491
492 Raises ``error.RevlogError`` is hash validation fails.
492 Raises ``error.RevlogError`` is hash validation fails.
493 """
493 """
494
494
495 def revision(node, raw=False):
495 def revision(node, raw=False):
496 """"Obtain fulltext data for a node.
496 """"Obtain fulltext data for a node.
497
497
498 By default, any storage transformations are applied before the data
498 By default, any storage transformations are applied before the data
499 is returned. If ``raw`` is True, non-raw storage transformations
499 is returned. If ``raw`` is True, non-raw storage transformations
500 are not applied.
500 are not applied.
501
501
502 The fulltext data may contain a header containing metadata. Most
502 The fulltext data may contain a header containing metadata. Most
503 consumers should use ``read()`` to obtain the actual file data.
503 consumers should use ``read()`` to obtain the actual file data.
504 """
504 """
505
505
506 def read(node):
506 def read(node):
507 """Resolve file fulltext data.
507 """Resolve file fulltext data.
508
508
509 This is similar to ``revision()`` except any metadata in the data
509 This is similar to ``revision()`` except any metadata in the data
510 headers is stripped.
510 headers is stripped.
511 """
511 """
512
512
513 def renamed(node):
513 def renamed(node):
514 """Obtain copy metadata for a node.
514 """Obtain copy metadata for a node.
515
515
516 Returns ``False`` if no copy metadata is stored or a 2-tuple of
516 Returns ``False`` if no copy metadata is stored or a 2-tuple of
517 (path, node) from which this revision was copied.
517 (path, node) from which this revision was copied.
518 """
518 """
519
519
520 def cmp(node, fulltext):
520 def cmp(node, fulltext):
521 """Compare fulltext to another revision.
521 """Compare fulltext to another revision.
522
522
523 Returns True if the fulltext is different from what is stored.
523 Returns True if the fulltext is different from what is stored.
524
524
525 This takes copy metadata into account.
525 This takes copy metadata into account.
526
526
527 TODO better document the copy metadata and censoring logic.
527 TODO better document the copy metadata and censoring logic.
528 """
528 """
529
529
530 def revdiff(rev1, rev2):
530 def revdiff(rev1, rev2):
531 """Obtain a delta between two revision numbers.
531 """Obtain a delta between two revision numbers.
532
532
533 Operates on raw data in the store (``revision(node, raw=True)``).
533 Operates on raw data in the store (``revision(node, raw=True)``).
534
534
535 The returned data is the result of ``bdiff.bdiff`` on the raw
535 The returned data is the result of ``bdiff.bdiff`` on the raw
536 revision data.
536 revision data.
537 """
537 """
538
538
539 class ifilemutation(zi.Interface):
539 class ifilemutation(interfaceutil.Interface):
540 """Storage interface for mutation events of a tracked file."""
540 """Storage interface for mutation events of a tracked file."""
541
541
542 def add(filedata, meta, transaction, linkrev, p1, p2):
542 def add(filedata, meta, transaction, linkrev, p1, p2):
543 """Add a new revision to the store.
543 """Add a new revision to the store.
544
544
545 Takes file data, dictionary of metadata, a transaction, linkrev,
545 Takes file data, dictionary of metadata, a transaction, linkrev,
546 and parent nodes.
546 and parent nodes.
547
547
548 Returns the node that was added.
548 Returns the node that was added.
549
549
550 May no-op if a revision matching the supplied data is already stored.
550 May no-op if a revision matching the supplied data is already stored.
551 """
551 """
552
552
553 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
553 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
554 flags=0, cachedelta=None):
554 flags=0, cachedelta=None):
555 """Add a new revision to the store.
555 """Add a new revision to the store.
556
556
557 This is similar to ``add()`` except it operates at a lower level.
557 This is similar to ``add()`` except it operates at a lower level.
558
558
559 The data passed in already contains a metadata header, if any.
559 The data passed in already contains a metadata header, if any.
560
560
561 ``node`` and ``flags`` can be used to define the expected node and
561 ``node`` and ``flags`` can be used to define the expected node and
562 the flags to use with storage.
562 the flags to use with storage.
563
563
564 ``add()`` is usually called when adding files from e.g. the working
564 ``add()`` is usually called when adding files from e.g. the working
565 directory. ``addrevision()`` is often called by ``add()`` and for
565 directory. ``addrevision()`` is often called by ``add()`` and for
566 scenarios where revision data has already been computed, such as when
566 scenarios where revision data has already been computed, such as when
567 applying raw data from a peer repo.
567 applying raw data from a peer repo.
568 """
568 """
569
569
570 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
570 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
571 """Process a series of deltas for storage.
571 """Process a series of deltas for storage.
572
572
573 ``deltas`` is an iterable of 7-tuples of
573 ``deltas`` is an iterable of 7-tuples of
574 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
574 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
575 to add.
575 to add.
576
576
577 The ``delta`` field contains ``mpatch`` data to apply to a base
577 The ``delta`` field contains ``mpatch`` data to apply to a base
578 revision, identified by ``deltabase``. The base node can be
578 revision, identified by ``deltabase``. The base node can be
579 ``nullid``, in which case the header from the delta can be ignored
579 ``nullid``, in which case the header from the delta can be ignored
580 and the delta used as the fulltext.
580 and the delta used as the fulltext.
581
581
582 ``addrevisioncb`` should be called for each node as it is committed.
582 ``addrevisioncb`` should be called for each node as it is committed.
583
583
584 Returns a list of nodes that were processed. A node will be in the list
584 Returns a list of nodes that were processed. A node will be in the list
585 even if it existed in the store previously.
585 even if it existed in the store previously.
586 """
586 """
587
587
588 def getstrippoint(minlink):
588 def getstrippoint(minlink):
589 """Find the minimum revision that must be stripped to strip a linkrev.
589 """Find the minimum revision that must be stripped to strip a linkrev.
590
590
591 Returns a 2-tuple containing the minimum revision number and a set
591 Returns a 2-tuple containing the minimum revision number and a set
592 of all revisions numbers that would be broken by this strip.
592 of all revisions numbers that would be broken by this strip.
593
593
594 TODO this is highly revlog centric and should be abstracted into
594 TODO this is highly revlog centric and should be abstracted into
595 a higher-level deletion API. ``repair.strip()`` relies on this.
595 a higher-level deletion API. ``repair.strip()`` relies on this.
596 """
596 """
597
597
598 def strip(minlink, transaction):
598 def strip(minlink, transaction):
599 """Remove storage of items starting at a linkrev.
599 """Remove storage of items starting at a linkrev.
600
600
601 This uses ``getstrippoint()`` to determine the first node to remove.
601 This uses ``getstrippoint()`` to determine the first node to remove.
602 Then it effectively truncates storage for all revisions after that.
602 Then it effectively truncates storage for all revisions after that.
603
603
604 TODO this is highly revlog centric and should be abstracted into a
604 TODO this is highly revlog centric and should be abstracted into a
605 higher-level deletion API.
605 higher-level deletion API.
606 """
606 """
607
607
608 class ifilestorage(ifileindex, ifiledata, ifilemutation):
608 class ifilestorage(ifileindex, ifiledata, ifilemutation):
609 """Complete storage interface for a single tracked file."""
609 """Complete storage interface for a single tracked file."""
610
610
611 version = zi.Attribute(
611 version = interfaceutil.Attribute(
612 """Version number of storage.
612 """Version number of storage.
613
613
614 TODO this feels revlog centric and could likely be removed.
614 TODO this feels revlog centric and could likely be removed.
615 """)
615 """)
616
616
617 storedeltachains = zi.Attribute(
617 storedeltachains = interfaceutil.Attribute(
618 """Whether the store stores deltas.
618 """Whether the store stores deltas.
619
619
620 TODO deltachains are revlog centric. This can probably removed
620 TODO deltachains are revlog centric. This can probably removed
621 once there are better abstractions for obtaining/writing
621 once there are better abstractions for obtaining/writing
622 data.
622 data.
623 """)
623 """)
624
624
625 _generaldelta = zi.Attribute(
625 _generaldelta = interfaceutil.Attribute(
626 """Whether deltas can be against any parent revision.
626 """Whether deltas can be against any parent revision.
627
627
628 TODO this is used by changegroup code and it could probably be
628 TODO this is used by changegroup code and it could probably be
629 folded into another API.
629 folded into another API.
630 """)
630 """)
631
631
632 def files():
632 def files():
633 """Obtain paths that are backing storage for this file.
633 """Obtain paths that are backing storage for this file.
634
634
635 TODO this is used heavily by verify code and there should probably
635 TODO this is used heavily by verify code and there should probably
636 be a better API for that.
636 be a better API for that.
637 """
637 """
638
638
639 def checksize():
639 def checksize():
640 """Obtain the expected sizes of backing files.
640 """Obtain the expected sizes of backing files.
641
641
642 TODO this is used by verify and it should not be part of the interface.
642 TODO this is used by verify and it should not be part of the interface.
643 """
643 """
644
644
645 class completelocalrepository(zi.Interface):
645 class completelocalrepository(interfaceutil.Interface):
646 """Monolithic interface for local repositories.
646 """Monolithic interface for local repositories.
647
647
648 This currently captures the reality of things - not how things should be.
648 This currently captures the reality of things - not how things should be.
649 """
649 """
650
650
651 supportedformats = zi.Attribute(
651 supportedformats = interfaceutil.Attribute(
652 """Set of requirements that apply to stream clone.
652 """Set of requirements that apply to stream clone.
653
653
654 This is actually a class attribute and is shared among all instances.
654 This is actually a class attribute and is shared among all instances.
655 """)
655 """)
656
656
657 openerreqs = zi.Attribute(
657 openerreqs = interfaceutil.Attribute(
658 """Set of requirements that are passed to the opener.
658 """Set of requirements that are passed to the opener.
659
659
660 This is actually a class attribute and is shared among all instances.
660 This is actually a class attribute and is shared among all instances.
661 """)
661 """)
662
662
663 supported = zi.Attribute(
663 supported = interfaceutil.Attribute(
664 """Set of requirements that this repo is capable of opening.""")
664 """Set of requirements that this repo is capable of opening.""")
665
665
666 requirements = zi.Attribute(
666 requirements = interfaceutil.Attribute(
667 """Set of requirements this repo uses.""")
667 """Set of requirements this repo uses.""")
668
668
669 filtername = zi.Attribute(
669 filtername = interfaceutil.Attribute(
670 """Name of the repoview that is active on this repo.""")
670 """Name of the repoview that is active on this repo.""")
671
671
672 wvfs = zi.Attribute(
672 wvfs = interfaceutil.Attribute(
673 """VFS used to access the working directory.""")
673 """VFS used to access the working directory.""")
674
674
675 vfs = zi.Attribute(
675 vfs = interfaceutil.Attribute(
676 """VFS rooted at the .hg directory.
676 """VFS rooted at the .hg directory.
677
677
678 Used to access repository data not in the store.
678 Used to access repository data not in the store.
679 """)
679 """)
680
680
681 svfs = zi.Attribute(
681 svfs = interfaceutil.Attribute(
682 """VFS rooted at the store.
682 """VFS rooted at the store.
683
683
684 Used to access repository data in the store. Typically .hg/store.
684 Used to access repository data in the store. Typically .hg/store.
685 But can point elsewhere if the store is shared.
685 But can point elsewhere if the store is shared.
686 """)
686 """)
687
687
688 root = zi.Attribute(
688 root = interfaceutil.Attribute(
689 """Path to the root of the working directory.""")
689 """Path to the root of the working directory.""")
690
690
691 path = zi.Attribute(
691 path = interfaceutil.Attribute(
692 """Path to the .hg directory.""")
692 """Path to the .hg directory.""")
693
693
694 origroot = zi.Attribute(
694 origroot = interfaceutil.Attribute(
695 """The filesystem path that was used to construct the repo.""")
695 """The filesystem path that was used to construct the repo.""")
696
696
697 auditor = zi.Attribute(
697 auditor = interfaceutil.Attribute(
698 """A pathauditor for the working directory.
698 """A pathauditor for the working directory.
699
699
700 This checks if a path refers to a nested repository.
700 This checks if a path refers to a nested repository.
701
701
702 Operates on the filesystem.
702 Operates on the filesystem.
703 """)
703 """)
704
704
705 nofsauditor = zi.Attribute(
705 nofsauditor = interfaceutil.Attribute(
706 """A pathauditor for the working directory.
706 """A pathauditor for the working directory.
707
707
708 This is like ``auditor`` except it doesn't do filesystem checks.
708 This is like ``auditor`` except it doesn't do filesystem checks.
709 """)
709 """)
710
710
711 baseui = zi.Attribute(
711 baseui = interfaceutil.Attribute(
712 """Original ui instance passed into constructor.""")
712 """Original ui instance passed into constructor.""")
713
713
714 ui = zi.Attribute(
714 ui = interfaceutil.Attribute(
715 """Main ui instance for this instance.""")
715 """Main ui instance for this instance.""")
716
716
717 sharedpath = zi.Attribute(
717 sharedpath = interfaceutil.Attribute(
718 """Path to the .hg directory of the repo this repo was shared from.""")
718 """Path to the .hg directory of the repo this repo was shared from.""")
719
719
720 store = zi.Attribute(
720 store = interfaceutil.Attribute(
721 """A store instance.""")
721 """A store instance.""")
722
722
723 spath = zi.Attribute(
723 spath = interfaceutil.Attribute(
724 """Path to the store.""")
724 """Path to the store.""")
725
725
726 sjoin = zi.Attribute(
726 sjoin = interfaceutil.Attribute(
727 """Alias to self.store.join.""")
727 """Alias to self.store.join.""")
728
728
729 cachevfs = zi.Attribute(
729 cachevfs = interfaceutil.Attribute(
730 """A VFS used to access the cache directory.
730 """A VFS used to access the cache directory.
731
731
732 Typically .hg/cache.
732 Typically .hg/cache.
733 """)
733 """)
734
734
735 filteredrevcache = zi.Attribute(
735 filteredrevcache = interfaceutil.Attribute(
736 """Holds sets of revisions to be filtered.""")
736 """Holds sets of revisions to be filtered.""")
737
737
738 names = zi.Attribute(
738 names = interfaceutil.Attribute(
739 """A ``namespaces`` instance.""")
739 """A ``namespaces`` instance.""")
740
740
741 def close():
741 def close():
742 """Close the handle on this repository."""
742 """Close the handle on this repository."""
743
743
744 def peer():
744 def peer():
745 """Obtain an object conforming to the ``peer`` interface."""
745 """Obtain an object conforming to the ``peer`` interface."""
746
746
747 def unfiltered():
747 def unfiltered():
748 """Obtain an unfiltered/raw view of this repo."""
748 """Obtain an unfiltered/raw view of this repo."""
749
749
750 def filtered(name, visibilityexceptions=None):
750 def filtered(name, visibilityexceptions=None):
751 """Obtain a named view of this repository."""
751 """Obtain a named view of this repository."""
752
752
753 obsstore = zi.Attribute(
753 obsstore = interfaceutil.Attribute(
754 """A store of obsolescence data.""")
754 """A store of obsolescence data.""")
755
755
756 changelog = zi.Attribute(
756 changelog = interfaceutil.Attribute(
757 """A handle on the changelog revlog.""")
757 """A handle on the changelog revlog.""")
758
758
759 manifestlog = zi.Attribute(
759 manifestlog = interfaceutil.Attribute(
760 """A handle on the root manifest revlog.""")
760 """A handle on the root manifest revlog.""")
761
761
762 dirstate = zi.Attribute(
762 dirstate = interfaceutil.Attribute(
763 """Working directory state.""")
763 """Working directory state.""")
764
764
765 narrowpats = zi.Attribute(
765 narrowpats = interfaceutil.Attribute(
766 """Matcher patterns for this repository's narrowspec.""")
766 """Matcher patterns for this repository's narrowspec.""")
767
767
768 def narrowmatch():
768 def narrowmatch():
769 """Obtain a matcher for the narrowspec."""
769 """Obtain a matcher for the narrowspec."""
770
770
771 def setnarrowpats(newincludes, newexcludes):
771 def setnarrowpats(newincludes, newexcludes):
772 """Define the narrowspec for this repository."""
772 """Define the narrowspec for this repository."""
773
773
774 def __getitem__(changeid):
774 def __getitem__(changeid):
775 """Try to resolve a changectx."""
775 """Try to resolve a changectx."""
776
776
777 def __contains__(changeid):
777 def __contains__(changeid):
778 """Whether a changeset exists."""
778 """Whether a changeset exists."""
779
779
780 def __nonzero__():
780 def __nonzero__():
781 """Always returns True."""
781 """Always returns True."""
782 return True
782 return True
783
783
784 __bool__ = __nonzero__
784 __bool__ = __nonzero__
785
785
786 def __len__():
786 def __len__():
787 """Returns the number of changesets in the repo."""
787 """Returns the number of changesets in the repo."""
788
788
789 def __iter__():
789 def __iter__():
790 """Iterate over revisions in the changelog."""
790 """Iterate over revisions in the changelog."""
791
791
792 def revs(expr, *args):
792 def revs(expr, *args):
793 """Evaluate a revset.
793 """Evaluate a revset.
794
794
795 Emits revisions.
795 Emits revisions.
796 """
796 """
797
797
798 def set(expr, *args):
798 def set(expr, *args):
799 """Evaluate a revset.
799 """Evaluate a revset.
800
800
801 Emits changectx instances.
801 Emits changectx instances.
802 """
802 """
803
803
804 def anyrevs(specs, user=False, localalias=None):
804 def anyrevs(specs, user=False, localalias=None):
805 """Find revisions matching one of the given revsets."""
805 """Find revisions matching one of the given revsets."""
806
806
807 def url():
807 def url():
808 """Returns a string representing the location of this repo."""
808 """Returns a string representing the location of this repo."""
809
809
810 def hook(name, throw=False, **args):
810 def hook(name, throw=False, **args):
811 """Call a hook."""
811 """Call a hook."""
812
812
813 def tags():
813 def tags():
814 """Return a mapping of tag to node."""
814 """Return a mapping of tag to node."""
815
815
816 def tagtype(tagname):
816 def tagtype(tagname):
817 """Return the type of a given tag."""
817 """Return the type of a given tag."""
818
818
819 def tagslist():
819 def tagslist():
820 """Return a list of tags ordered by revision."""
820 """Return a list of tags ordered by revision."""
821
821
822 def nodetags(node):
822 def nodetags(node):
823 """Return the tags associated with a node."""
823 """Return the tags associated with a node."""
824
824
825 def nodebookmarks(node):
825 def nodebookmarks(node):
826 """Return the list of bookmarks pointing to the specified node."""
826 """Return the list of bookmarks pointing to the specified node."""
827
827
828 def branchmap():
828 def branchmap():
829 """Return a mapping of branch to heads in that branch."""
829 """Return a mapping of branch to heads in that branch."""
830
830
831 def revbranchcache():
831 def revbranchcache():
832 pass
832 pass
833
833
834 def branchtip(branchtip, ignoremissing=False):
834 def branchtip(branchtip, ignoremissing=False):
835 """Return the tip node for a given branch."""
835 """Return the tip node for a given branch."""
836
836
837 def lookup(key):
837 def lookup(key):
838 """Resolve the node for a revision."""
838 """Resolve the node for a revision."""
839
839
840 def lookupbranch(key):
840 def lookupbranch(key):
841 """Look up the branch name of the given revision or branch name."""
841 """Look up the branch name of the given revision or branch name."""
842
842
843 def known(nodes):
843 def known(nodes):
844 """Determine whether a series of nodes is known.
844 """Determine whether a series of nodes is known.
845
845
846 Returns a list of bools.
846 Returns a list of bools.
847 """
847 """
848
848
849 def local():
849 def local():
850 """Whether the repository is local."""
850 """Whether the repository is local."""
851 return True
851 return True
852
852
853 def publishing():
853 def publishing():
854 """Whether the repository is a publishing repository."""
854 """Whether the repository is a publishing repository."""
855
855
856 def cancopy():
856 def cancopy():
857 pass
857 pass
858
858
859 def shared():
859 def shared():
860 """The type of shared repository or None."""
860 """The type of shared repository or None."""
861
861
862 def wjoin(f, *insidef):
862 def wjoin(f, *insidef):
863 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
863 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
864
864
865 def file(f):
865 def file(f):
866 """Obtain a filelog for a tracked path."""
866 """Obtain a filelog for a tracked path."""
867
867
868 def setparents(p1, p2):
868 def setparents(p1, p2):
869 """Set the parent nodes of the working directory."""
869 """Set the parent nodes of the working directory."""
870
870
871 def filectx(path, changeid=None, fileid=None):
871 def filectx(path, changeid=None, fileid=None):
872 """Obtain a filectx for the given file revision."""
872 """Obtain a filectx for the given file revision."""
873
873
874 def getcwd():
874 def getcwd():
875 """Obtain the current working directory from the dirstate."""
875 """Obtain the current working directory from the dirstate."""
876
876
877 def pathto(f, cwd=None):
877 def pathto(f, cwd=None):
878 """Obtain the relative path to a file."""
878 """Obtain the relative path to a file."""
879
879
880 def adddatafilter(name, fltr):
880 def adddatafilter(name, fltr):
881 pass
881 pass
882
882
883 def wread(filename):
883 def wread(filename):
884 """Read a file from wvfs, using data filters."""
884 """Read a file from wvfs, using data filters."""
885
885
886 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
886 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
887 """Write data to a file in the wvfs, using data filters."""
887 """Write data to a file in the wvfs, using data filters."""
888
888
889 def wwritedata(filename, data):
889 def wwritedata(filename, data):
890 """Resolve data for writing to the wvfs, using data filters."""
890 """Resolve data for writing to the wvfs, using data filters."""
891
891
892 def currenttransaction():
892 def currenttransaction():
893 """Obtain the current transaction instance or None."""
893 """Obtain the current transaction instance or None."""
894
894
895 def transaction(desc, report=None):
895 def transaction(desc, report=None):
896 """Open a new transaction to write to the repository."""
896 """Open a new transaction to write to the repository."""
897
897
898 def undofiles():
898 def undofiles():
899 """Returns a list of (vfs, path) for files to undo transactions."""
899 """Returns a list of (vfs, path) for files to undo transactions."""
900
900
901 def recover():
901 def recover():
902 """Roll back an interrupted transaction."""
902 """Roll back an interrupted transaction."""
903
903
904 def rollback(dryrun=False, force=False):
904 def rollback(dryrun=False, force=False):
905 """Undo the last transaction.
905 """Undo the last transaction.
906
906
907 DANGEROUS.
907 DANGEROUS.
908 """
908 """
909
909
910 def updatecaches(tr=None, full=False):
910 def updatecaches(tr=None, full=False):
911 """Warm repo caches."""
911 """Warm repo caches."""
912
912
913 def invalidatecaches():
913 def invalidatecaches():
914 """Invalidate cached data due to the repository mutating."""
914 """Invalidate cached data due to the repository mutating."""
915
915
916 def invalidatevolatilesets():
916 def invalidatevolatilesets():
917 pass
917 pass
918
918
919 def invalidatedirstate():
919 def invalidatedirstate():
920 """Invalidate the dirstate."""
920 """Invalidate the dirstate."""
921
921
922 def invalidate(clearfilecache=False):
922 def invalidate(clearfilecache=False):
923 pass
923 pass
924
924
925 def invalidateall():
925 def invalidateall():
926 pass
926 pass
927
927
928 def lock(wait=True):
928 def lock(wait=True):
929 """Lock the repository store and return a lock instance."""
929 """Lock the repository store and return a lock instance."""
930
930
931 def wlock(wait=True):
931 def wlock(wait=True):
932 """Lock the non-store parts of the repository."""
932 """Lock the non-store parts of the repository."""
933
933
934 def currentwlock():
934 def currentwlock():
935 """Return the wlock if it's held or None."""
935 """Return the wlock if it's held or None."""
936
936
937 def checkcommitpatterns(wctx, vdirs, match, status, fail):
937 def checkcommitpatterns(wctx, vdirs, match, status, fail):
938 pass
938 pass
939
939
940 def commit(text='', user=None, date=None, match=None, force=False,
940 def commit(text='', user=None, date=None, match=None, force=False,
941 editor=False, extra=None):
941 editor=False, extra=None):
942 """Add a new revision to the repository."""
942 """Add a new revision to the repository."""
943
943
944 def commitctx(ctx, error=False):
944 def commitctx(ctx, error=False):
945 """Commit a commitctx instance to the repository."""
945 """Commit a commitctx instance to the repository."""
946
946
947 def destroying():
947 def destroying():
948 """Inform the repository that nodes are about to be destroyed."""
948 """Inform the repository that nodes are about to be destroyed."""
949
949
950 def destroyed():
950 def destroyed():
951 """Inform the repository that nodes have been destroyed."""
951 """Inform the repository that nodes have been destroyed."""
952
952
953 def status(node1='.', node2=None, match=None, ignored=False,
953 def status(node1='.', node2=None, match=None, ignored=False,
954 clean=False, unknown=False, listsubrepos=False):
954 clean=False, unknown=False, listsubrepos=False):
955 """Convenience method to call repo[x].status()."""
955 """Convenience method to call repo[x].status()."""
956
956
957 def addpostdsstatus(ps):
957 def addpostdsstatus(ps):
958 pass
958 pass
959
959
960 def postdsstatus():
960 def postdsstatus():
961 pass
961 pass
962
962
963 def clearpostdsstatus():
963 def clearpostdsstatus():
964 pass
964 pass
965
965
966 def heads(start=None):
966 def heads(start=None):
967 """Obtain list of nodes that are DAG heads."""
967 """Obtain list of nodes that are DAG heads."""
968
968
969 def branchheads(branch=None, start=None, closed=False):
969 def branchheads(branch=None, start=None, closed=False):
970 pass
970 pass
971
971
972 def branches(nodes):
972 def branches(nodes):
973 pass
973 pass
974
974
975 def between(pairs):
975 def between(pairs):
976 pass
976 pass
977
977
978 def checkpush(pushop):
978 def checkpush(pushop):
979 pass
979 pass
980
980
981 prepushoutgoinghooks = zi.Attribute(
981 prepushoutgoinghooks = interfaceutil.Attribute(
982 """util.hooks instance.""")
982 """util.hooks instance.""")
983
983
984 def pushkey(namespace, key, old, new):
984 def pushkey(namespace, key, old, new):
985 pass
985 pass
986
986
987 def listkeys(namespace):
987 def listkeys(namespace):
988 pass
988 pass
989
989
990 def debugwireargs(one, two, three=None, four=None, five=None):
990 def debugwireargs(one, two, three=None, four=None, five=None):
991 pass
991 pass
992
992
993 def savecommitmessage(text):
993 def savecommitmessage(text):
994 pass
994 pass
@@ -1,813 +1,811 b''
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import struct
10 import struct
11 import sys
11 import sys
12 import threading
12 import threading
13
13
14 from .i18n import _
14 from .i18n import _
15 from .thirdparty import (
15 from .thirdparty import (
16 cbor,
16 cbor,
17 )
17 )
18 from .thirdparty.zope import (
19 interface as zi,
20 )
21 from . import (
18 from . import (
22 encoding,
19 encoding,
23 error,
20 error,
24 hook,
21 hook,
25 pycompat,
22 pycompat,
26 util,
23 util,
27 wireprototypes,
24 wireprototypes,
28 wireprotov1server,
25 wireprotov1server,
29 wireprotov2server,
26 wireprotov2server,
30 )
27 )
31 from .utils import (
28 from .utils import (
29 interfaceutil,
32 procutil,
30 procutil,
33 )
31 )
34
32
35 stringio = util.stringio
33 stringio = util.stringio
36
34
37 urlerr = util.urlerr
35 urlerr = util.urlerr
38 urlreq = util.urlreq
36 urlreq = util.urlreq
39
37
40 HTTP_OK = 200
38 HTTP_OK = 200
41
39
42 HGTYPE = 'application/mercurial-0.1'
40 HGTYPE = 'application/mercurial-0.1'
43 HGTYPE2 = 'application/mercurial-0.2'
41 HGTYPE2 = 'application/mercurial-0.2'
44 HGERRTYPE = 'application/hg-error'
42 HGERRTYPE = 'application/hg-error'
45
43
46 SSHV1 = wireprototypes.SSHV1
44 SSHV1 = wireprototypes.SSHV1
47 SSHV2 = wireprototypes.SSHV2
45 SSHV2 = wireprototypes.SSHV2
48
46
49 def decodevaluefromheaders(req, headerprefix):
47 def decodevaluefromheaders(req, headerprefix):
50 """Decode a long value from multiple HTTP request headers.
48 """Decode a long value from multiple HTTP request headers.
51
49
52 Returns the value as a bytes, not a str.
50 Returns the value as a bytes, not a str.
53 """
51 """
54 chunks = []
52 chunks = []
55 i = 1
53 i = 1
56 while True:
54 while True:
57 v = req.headers.get(b'%s-%d' % (headerprefix, i))
55 v = req.headers.get(b'%s-%d' % (headerprefix, i))
58 if v is None:
56 if v is None:
59 break
57 break
60 chunks.append(pycompat.bytesurl(v))
58 chunks.append(pycompat.bytesurl(v))
61 i += 1
59 i += 1
62
60
63 return ''.join(chunks)
61 return ''.join(chunks)
64
62
65 @zi.implementer(wireprototypes.baseprotocolhandler)
63 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
66 class httpv1protocolhandler(object):
64 class httpv1protocolhandler(object):
67 def __init__(self, req, ui, checkperm):
65 def __init__(self, req, ui, checkperm):
68 self._req = req
66 self._req = req
69 self._ui = ui
67 self._ui = ui
70 self._checkperm = checkperm
68 self._checkperm = checkperm
71 self._protocaps = None
69 self._protocaps = None
72
70
73 @property
71 @property
74 def name(self):
72 def name(self):
75 return 'http-v1'
73 return 'http-v1'
76
74
77 def getargs(self, args):
75 def getargs(self, args):
78 knownargs = self._args()
76 knownargs = self._args()
79 data = {}
77 data = {}
80 keys = args.split()
78 keys = args.split()
81 for k in keys:
79 for k in keys:
82 if k == '*':
80 if k == '*':
83 star = {}
81 star = {}
84 for key in knownargs.keys():
82 for key in knownargs.keys():
85 if key != 'cmd' and key not in keys:
83 if key != 'cmd' and key not in keys:
86 star[key] = knownargs[key][0]
84 star[key] = knownargs[key][0]
87 data['*'] = star
85 data['*'] = star
88 else:
86 else:
89 data[k] = knownargs[k][0]
87 data[k] = knownargs[k][0]
90 return [data[k] for k in keys]
88 return [data[k] for k in keys]
91
89
92 def _args(self):
90 def _args(self):
93 args = self._req.qsparams.asdictoflists()
91 args = self._req.qsparams.asdictoflists()
94 postlen = int(self._req.headers.get(b'X-HgArgs-Post', 0))
92 postlen = int(self._req.headers.get(b'X-HgArgs-Post', 0))
95 if postlen:
93 if postlen:
96 args.update(urlreq.parseqs(
94 args.update(urlreq.parseqs(
97 self._req.bodyfh.read(postlen), keep_blank_values=True))
95 self._req.bodyfh.read(postlen), keep_blank_values=True))
98 return args
96 return args
99
97
100 argvalue = decodevaluefromheaders(self._req, b'X-HgArg')
98 argvalue = decodevaluefromheaders(self._req, b'X-HgArg')
101 args.update(urlreq.parseqs(argvalue, keep_blank_values=True))
99 args.update(urlreq.parseqs(argvalue, keep_blank_values=True))
102 return args
100 return args
103
101
104 def getprotocaps(self):
102 def getprotocaps(self):
105 if self._protocaps is None:
103 if self._protocaps is None:
106 value = decodevaluefromheaders(self._req, b'X-HgProto')
104 value = decodevaluefromheaders(self._req, b'X-HgProto')
107 self._protocaps = set(value.split(' '))
105 self._protocaps = set(value.split(' '))
108 return self._protocaps
106 return self._protocaps
109
107
110 def getpayload(self):
108 def getpayload(self):
111 # Existing clients *always* send Content-Length.
109 # Existing clients *always* send Content-Length.
112 length = int(self._req.headers[b'Content-Length'])
110 length = int(self._req.headers[b'Content-Length'])
113
111
114 # If httppostargs is used, we need to read Content-Length
112 # If httppostargs is used, we need to read Content-Length
115 # minus the amount that was consumed by args.
113 # minus the amount that was consumed by args.
116 length -= int(self._req.headers.get(b'X-HgArgs-Post', 0))
114 length -= int(self._req.headers.get(b'X-HgArgs-Post', 0))
117 return util.filechunkiter(self._req.bodyfh, limit=length)
115 return util.filechunkiter(self._req.bodyfh, limit=length)
118
116
119 @contextlib.contextmanager
117 @contextlib.contextmanager
120 def mayberedirectstdio(self):
118 def mayberedirectstdio(self):
121 oldout = self._ui.fout
119 oldout = self._ui.fout
122 olderr = self._ui.ferr
120 olderr = self._ui.ferr
123
121
124 out = util.stringio()
122 out = util.stringio()
125
123
126 try:
124 try:
127 self._ui.fout = out
125 self._ui.fout = out
128 self._ui.ferr = out
126 self._ui.ferr = out
129 yield out
127 yield out
130 finally:
128 finally:
131 self._ui.fout = oldout
129 self._ui.fout = oldout
132 self._ui.ferr = olderr
130 self._ui.ferr = olderr
133
131
134 def client(self):
132 def client(self):
135 return 'remote:%s:%s:%s' % (
133 return 'remote:%s:%s:%s' % (
136 self._req.urlscheme,
134 self._req.urlscheme,
137 urlreq.quote(self._req.remotehost or ''),
135 urlreq.quote(self._req.remotehost or ''),
138 urlreq.quote(self._req.remoteuser or ''))
136 urlreq.quote(self._req.remoteuser or ''))
139
137
140 def addcapabilities(self, repo, caps):
138 def addcapabilities(self, repo, caps):
141 caps.append(b'batch')
139 caps.append(b'batch')
142
140
143 caps.append('httpheader=%d' %
141 caps.append('httpheader=%d' %
144 repo.ui.configint('server', 'maxhttpheaderlen'))
142 repo.ui.configint('server', 'maxhttpheaderlen'))
145 if repo.ui.configbool('experimental', 'httppostargs'):
143 if repo.ui.configbool('experimental', 'httppostargs'):
146 caps.append('httppostargs')
144 caps.append('httppostargs')
147
145
148 # FUTURE advertise 0.2rx once support is implemented
146 # FUTURE advertise 0.2rx once support is implemented
149 # FUTURE advertise minrx and mintx after consulting config option
147 # FUTURE advertise minrx and mintx after consulting config option
150 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
148 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
151
149
152 compengines = wireprototypes.supportedcompengines(repo.ui,
150 compengines = wireprototypes.supportedcompengines(repo.ui,
153 util.SERVERROLE)
151 util.SERVERROLE)
154 if compengines:
152 if compengines:
155 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
153 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
156 for e in compengines)
154 for e in compengines)
157 caps.append('compression=%s' % comptypes)
155 caps.append('compression=%s' % comptypes)
158
156
159 return caps
157 return caps
160
158
161 def checkperm(self, perm):
159 def checkperm(self, perm):
162 return self._checkperm(perm)
160 return self._checkperm(perm)
163
161
164 # This method exists mostly so that extensions like remotefilelog can
162 # This method exists mostly so that extensions like remotefilelog can
165 # disable a kludgey legacy method only over http. As of early 2018,
163 # disable a kludgey legacy method only over http. As of early 2018,
166 # there are no other known users, so with any luck we can discard this
164 # there are no other known users, so with any luck we can discard this
167 # hook if remotefilelog becomes a first-party extension.
165 # hook if remotefilelog becomes a first-party extension.
168 def iscmd(cmd):
166 def iscmd(cmd):
169 return cmd in wireprotov1server.commands
167 return cmd in wireprotov1server.commands
170
168
171 def handlewsgirequest(rctx, req, res, checkperm):
169 def handlewsgirequest(rctx, req, res, checkperm):
172 """Possibly process a wire protocol request.
170 """Possibly process a wire protocol request.
173
171
174 If the current request is a wire protocol request, the request is
172 If the current request is a wire protocol request, the request is
175 processed by this function.
173 processed by this function.
176
174
177 ``req`` is a ``parsedrequest`` instance.
175 ``req`` is a ``parsedrequest`` instance.
178 ``res`` is a ``wsgiresponse`` instance.
176 ``res`` is a ``wsgiresponse`` instance.
179
177
180 Returns a bool indicating if the request was serviced. If set, the caller
178 Returns a bool indicating if the request was serviced. If set, the caller
181 should stop processing the request, as a response has already been issued.
179 should stop processing the request, as a response has already been issued.
182 """
180 """
183 # Avoid cycle involving hg module.
181 # Avoid cycle involving hg module.
184 from .hgweb import common as hgwebcommon
182 from .hgweb import common as hgwebcommon
185
183
186 repo = rctx.repo
184 repo = rctx.repo
187
185
188 # HTTP version 1 wire protocol requests are denoted by a "cmd" query
186 # HTTP version 1 wire protocol requests are denoted by a "cmd" query
189 # string parameter. If it isn't present, this isn't a wire protocol
187 # string parameter. If it isn't present, this isn't a wire protocol
190 # request.
188 # request.
191 if 'cmd' not in req.qsparams:
189 if 'cmd' not in req.qsparams:
192 return False
190 return False
193
191
194 cmd = req.qsparams['cmd']
192 cmd = req.qsparams['cmd']
195
193
196 # The "cmd" request parameter is used by both the wire protocol and hgweb.
194 # The "cmd" request parameter is used by both the wire protocol and hgweb.
197 # While not all wire protocol commands are available for all transports,
195 # While not all wire protocol commands are available for all transports,
198 # if we see a "cmd" value that resembles a known wire protocol command, we
196 # if we see a "cmd" value that resembles a known wire protocol command, we
199 # route it to a protocol handler. This is better than routing possible
197 # route it to a protocol handler. This is better than routing possible
200 # wire protocol requests to hgweb because it prevents hgweb from using
198 # wire protocol requests to hgweb because it prevents hgweb from using
201 # known wire protocol commands and it is less confusing for machine
199 # known wire protocol commands and it is less confusing for machine
202 # clients.
200 # clients.
203 if not iscmd(cmd):
201 if not iscmd(cmd):
204 return False
202 return False
205
203
206 # The "cmd" query string argument is only valid on the root path of the
204 # The "cmd" query string argument is only valid on the root path of the
207 # repo. e.g. ``/?cmd=foo``, ``/repo?cmd=foo``. URL paths within the repo
205 # repo. e.g. ``/?cmd=foo``, ``/repo?cmd=foo``. URL paths within the repo
208 # like ``/blah?cmd=foo`` are not allowed. So don't recognize the request
206 # like ``/blah?cmd=foo`` are not allowed. So don't recognize the request
209 # in this case. We send an HTTP 404 for backwards compatibility reasons.
207 # in this case. We send an HTTP 404 for backwards compatibility reasons.
210 if req.dispatchpath:
208 if req.dispatchpath:
211 res.status = hgwebcommon.statusmessage(404)
209 res.status = hgwebcommon.statusmessage(404)
212 res.headers['Content-Type'] = HGTYPE
210 res.headers['Content-Type'] = HGTYPE
213 # TODO This is not a good response to issue for this request. This
211 # TODO This is not a good response to issue for this request. This
214 # is mostly for BC for now.
212 # is mostly for BC for now.
215 res.setbodybytes('0\n%s\n' % b'Not Found')
213 res.setbodybytes('0\n%s\n' % b'Not Found')
216 return True
214 return True
217
215
218 proto = httpv1protocolhandler(req, repo.ui,
216 proto = httpv1protocolhandler(req, repo.ui,
219 lambda perm: checkperm(rctx, req, perm))
217 lambda perm: checkperm(rctx, req, perm))
220
218
221 # The permissions checker should be the only thing that can raise an
219 # The permissions checker should be the only thing that can raise an
222 # ErrorResponse. It is kind of a layer violation to catch an hgweb
220 # ErrorResponse. It is kind of a layer violation to catch an hgweb
223 # exception here. So consider refactoring into a exception type that
221 # exception here. So consider refactoring into a exception type that
224 # is associated with the wire protocol.
222 # is associated with the wire protocol.
225 try:
223 try:
226 _callhttp(repo, req, res, proto, cmd)
224 _callhttp(repo, req, res, proto, cmd)
227 except hgwebcommon.ErrorResponse as e:
225 except hgwebcommon.ErrorResponse as e:
228 for k, v in e.headers:
226 for k, v in e.headers:
229 res.headers[k] = v
227 res.headers[k] = v
230 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
228 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
231 # TODO This response body assumes the failed command was
229 # TODO This response body assumes the failed command was
232 # "unbundle." That assumption is not always valid.
230 # "unbundle." That assumption is not always valid.
233 res.setbodybytes('0\n%s\n' % pycompat.bytestr(e))
231 res.setbodybytes('0\n%s\n' % pycompat.bytestr(e))
234
232
235 return True
233 return True
236
234
237 def _availableapis(repo):
235 def _availableapis(repo):
238 apis = set()
236 apis = set()
239
237
240 # Registered APIs are made available via config options of the name of
238 # Registered APIs are made available via config options of the name of
241 # the protocol.
239 # the protocol.
242 for k, v in API_HANDLERS.items():
240 for k, v in API_HANDLERS.items():
243 section, option = v['config']
241 section, option = v['config']
244 if repo.ui.configbool(section, option):
242 if repo.ui.configbool(section, option):
245 apis.add(k)
243 apis.add(k)
246
244
247 return apis
245 return apis
248
246
249 def handlewsgiapirequest(rctx, req, res, checkperm):
247 def handlewsgiapirequest(rctx, req, res, checkperm):
250 """Handle requests to /api/*."""
248 """Handle requests to /api/*."""
251 assert req.dispatchparts[0] == b'api'
249 assert req.dispatchparts[0] == b'api'
252
250
253 repo = rctx.repo
251 repo = rctx.repo
254
252
255 # This whole URL space is experimental for now. But we want to
253 # This whole URL space is experimental for now. But we want to
256 # reserve the URL space. So, 404 all URLs if the feature isn't enabled.
254 # reserve the URL space. So, 404 all URLs if the feature isn't enabled.
257 if not repo.ui.configbool('experimental', 'web.apiserver'):
255 if not repo.ui.configbool('experimental', 'web.apiserver'):
258 res.status = b'404 Not Found'
256 res.status = b'404 Not Found'
259 res.headers[b'Content-Type'] = b'text/plain'
257 res.headers[b'Content-Type'] = b'text/plain'
260 res.setbodybytes(_('Experimental API server endpoint not enabled'))
258 res.setbodybytes(_('Experimental API server endpoint not enabled'))
261 return
259 return
262
260
263 # The URL space is /api/<protocol>/*. The structure of URLs under varies
261 # The URL space is /api/<protocol>/*. The structure of URLs under varies
264 # by <protocol>.
262 # by <protocol>.
265
263
266 availableapis = _availableapis(repo)
264 availableapis = _availableapis(repo)
267
265
268 # Requests to /api/ list available APIs.
266 # Requests to /api/ list available APIs.
269 if req.dispatchparts == [b'api']:
267 if req.dispatchparts == [b'api']:
270 res.status = b'200 OK'
268 res.status = b'200 OK'
271 res.headers[b'Content-Type'] = b'text/plain'
269 res.headers[b'Content-Type'] = b'text/plain'
272 lines = [_('APIs can be accessed at /api/<name>, where <name> can be '
270 lines = [_('APIs can be accessed at /api/<name>, where <name> can be '
273 'one of the following:\n')]
271 'one of the following:\n')]
274 if availableapis:
272 if availableapis:
275 lines.extend(sorted(availableapis))
273 lines.extend(sorted(availableapis))
276 else:
274 else:
277 lines.append(_('(no available APIs)\n'))
275 lines.append(_('(no available APIs)\n'))
278 res.setbodybytes(b'\n'.join(lines))
276 res.setbodybytes(b'\n'.join(lines))
279 return
277 return
280
278
281 proto = req.dispatchparts[1]
279 proto = req.dispatchparts[1]
282
280
283 if proto not in API_HANDLERS:
281 if proto not in API_HANDLERS:
284 res.status = b'404 Not Found'
282 res.status = b'404 Not Found'
285 res.headers[b'Content-Type'] = b'text/plain'
283 res.headers[b'Content-Type'] = b'text/plain'
286 res.setbodybytes(_('Unknown API: %s\nKnown APIs: %s') % (
284 res.setbodybytes(_('Unknown API: %s\nKnown APIs: %s') % (
287 proto, b', '.join(sorted(availableapis))))
285 proto, b', '.join(sorted(availableapis))))
288 return
286 return
289
287
290 if proto not in availableapis:
288 if proto not in availableapis:
291 res.status = b'404 Not Found'
289 res.status = b'404 Not Found'
292 res.headers[b'Content-Type'] = b'text/plain'
290 res.headers[b'Content-Type'] = b'text/plain'
293 res.setbodybytes(_('API %s not enabled\n') % proto)
291 res.setbodybytes(_('API %s not enabled\n') % proto)
294 return
292 return
295
293
296 API_HANDLERS[proto]['handler'](rctx, req, res, checkperm,
294 API_HANDLERS[proto]['handler'](rctx, req, res, checkperm,
297 req.dispatchparts[2:])
295 req.dispatchparts[2:])
298
296
299 # Maps API name to metadata so custom API can be registered.
297 # Maps API name to metadata so custom API can be registered.
300 # Keys are:
298 # Keys are:
301 #
299 #
302 # config
300 # config
303 # Config option that controls whether service is enabled.
301 # Config option that controls whether service is enabled.
304 # handler
302 # handler
305 # Callable receiving (rctx, req, res, checkperm, urlparts) that is called
303 # Callable receiving (rctx, req, res, checkperm, urlparts) that is called
306 # when a request to this API is received.
304 # when a request to this API is received.
307 # apidescriptor
305 # apidescriptor
308 # Callable receiving (req, repo) that is called to obtain an API
306 # Callable receiving (req, repo) that is called to obtain an API
309 # descriptor for this service. The response must be serializable to CBOR.
307 # descriptor for this service. The response must be serializable to CBOR.
310 API_HANDLERS = {
308 API_HANDLERS = {
311 wireprotov2server.HTTP_WIREPROTO_V2: {
309 wireprotov2server.HTTP_WIREPROTO_V2: {
312 'config': ('experimental', 'web.api.http-v2'),
310 'config': ('experimental', 'web.api.http-v2'),
313 'handler': wireprotov2server.handlehttpv2request,
311 'handler': wireprotov2server.handlehttpv2request,
314 'apidescriptor': wireprotov2server.httpv2apidescriptor,
312 'apidescriptor': wireprotov2server.httpv2apidescriptor,
315 },
313 },
316 }
314 }
317
315
318 def _httpresponsetype(ui, proto, prefer_uncompressed):
316 def _httpresponsetype(ui, proto, prefer_uncompressed):
319 """Determine the appropriate response type and compression settings.
317 """Determine the appropriate response type and compression settings.
320
318
321 Returns a tuple of (mediatype, compengine, engineopts).
319 Returns a tuple of (mediatype, compengine, engineopts).
322 """
320 """
323 # Determine the response media type and compression engine based
321 # Determine the response media type and compression engine based
324 # on the request parameters.
322 # on the request parameters.
325
323
326 if '0.2' in proto.getprotocaps():
324 if '0.2' in proto.getprotocaps():
327 # All clients are expected to support uncompressed data.
325 # All clients are expected to support uncompressed data.
328 if prefer_uncompressed:
326 if prefer_uncompressed:
329 return HGTYPE2, util._noopengine(), {}
327 return HGTYPE2, util._noopengine(), {}
330
328
331 # Now find an agreed upon compression format.
329 # Now find an agreed upon compression format.
332 compformats = wireprotov1server.clientcompressionsupport(proto)
330 compformats = wireprotov1server.clientcompressionsupport(proto)
333 for engine in wireprototypes.supportedcompengines(ui, util.SERVERROLE):
331 for engine in wireprototypes.supportedcompengines(ui, util.SERVERROLE):
334 if engine.wireprotosupport().name in compformats:
332 if engine.wireprotosupport().name in compformats:
335 opts = {}
333 opts = {}
336 level = ui.configint('server', '%slevel' % engine.name())
334 level = ui.configint('server', '%slevel' % engine.name())
337 if level is not None:
335 if level is not None:
338 opts['level'] = level
336 opts['level'] = level
339
337
340 return HGTYPE2, engine, opts
338 return HGTYPE2, engine, opts
341
339
342 # No mutually supported compression format. Fall back to the
340 # No mutually supported compression format. Fall back to the
343 # legacy protocol.
341 # legacy protocol.
344
342
345 # Don't allow untrusted settings because disabling compression or
343 # Don't allow untrusted settings because disabling compression or
346 # setting a very high compression level could lead to flooding
344 # setting a very high compression level could lead to flooding
347 # the server's network or CPU.
345 # the server's network or CPU.
348 opts = {'level': ui.configint('server', 'zliblevel')}
346 opts = {'level': ui.configint('server', 'zliblevel')}
349 return HGTYPE, util.compengines['zlib'], opts
347 return HGTYPE, util.compengines['zlib'], opts
350
348
351 def processcapabilitieshandshake(repo, req, res, proto):
349 def processcapabilitieshandshake(repo, req, res, proto):
352 """Called during a ?cmd=capabilities request.
350 """Called during a ?cmd=capabilities request.
353
351
354 If the client is advertising support for a newer protocol, we send
352 If the client is advertising support for a newer protocol, we send
355 a CBOR response with information about available services. If no
353 a CBOR response with information about available services. If no
356 advertised services are available, we don't handle the request.
354 advertised services are available, we don't handle the request.
357 """
355 """
358 # Fall back to old behavior unless the API server is enabled.
356 # Fall back to old behavior unless the API server is enabled.
359 if not repo.ui.configbool('experimental', 'web.apiserver'):
357 if not repo.ui.configbool('experimental', 'web.apiserver'):
360 return False
358 return False
361
359
362 clientapis = decodevaluefromheaders(req, b'X-HgUpgrade')
360 clientapis = decodevaluefromheaders(req, b'X-HgUpgrade')
363 protocaps = decodevaluefromheaders(req, b'X-HgProto')
361 protocaps = decodevaluefromheaders(req, b'X-HgProto')
364 if not clientapis or not protocaps:
362 if not clientapis or not protocaps:
365 return False
363 return False
366
364
367 # We currently only support CBOR responses.
365 # We currently only support CBOR responses.
368 protocaps = set(protocaps.split(' '))
366 protocaps = set(protocaps.split(' '))
369 if b'cbor' not in protocaps:
367 if b'cbor' not in protocaps:
370 return False
368 return False
371
369
372 descriptors = {}
370 descriptors = {}
373
371
374 for api in sorted(set(clientapis.split()) & _availableapis(repo)):
372 for api in sorted(set(clientapis.split()) & _availableapis(repo)):
375 handler = API_HANDLERS[api]
373 handler = API_HANDLERS[api]
376
374
377 descriptorfn = handler.get('apidescriptor')
375 descriptorfn = handler.get('apidescriptor')
378 if not descriptorfn:
376 if not descriptorfn:
379 continue
377 continue
380
378
381 descriptors[api] = descriptorfn(req, repo)
379 descriptors[api] = descriptorfn(req, repo)
382
380
383 v1caps = wireprotov1server.dispatch(repo, proto, 'capabilities')
381 v1caps = wireprotov1server.dispatch(repo, proto, 'capabilities')
384 assert isinstance(v1caps, wireprototypes.bytesresponse)
382 assert isinstance(v1caps, wireprototypes.bytesresponse)
385
383
386 m = {
384 m = {
387 # TODO allow this to be configurable.
385 # TODO allow this to be configurable.
388 'apibase': 'api/',
386 'apibase': 'api/',
389 'apis': descriptors,
387 'apis': descriptors,
390 'v1capabilities': v1caps.data,
388 'v1capabilities': v1caps.data,
391 }
389 }
392
390
393 res.status = b'200 OK'
391 res.status = b'200 OK'
394 res.headers[b'Content-Type'] = b'application/mercurial-cbor'
392 res.headers[b'Content-Type'] = b'application/mercurial-cbor'
395 res.setbodybytes(cbor.dumps(m, canonical=True))
393 res.setbodybytes(cbor.dumps(m, canonical=True))
396
394
397 return True
395 return True
398
396
399 def _callhttp(repo, req, res, proto, cmd):
397 def _callhttp(repo, req, res, proto, cmd):
400 # Avoid cycle involving hg module.
398 # Avoid cycle involving hg module.
401 from .hgweb import common as hgwebcommon
399 from .hgweb import common as hgwebcommon
402
400
403 def genversion2(gen, engine, engineopts):
401 def genversion2(gen, engine, engineopts):
404 # application/mercurial-0.2 always sends a payload header
402 # application/mercurial-0.2 always sends a payload header
405 # identifying the compression engine.
403 # identifying the compression engine.
406 name = engine.wireprotosupport().name
404 name = engine.wireprotosupport().name
407 assert 0 < len(name) < 256
405 assert 0 < len(name) < 256
408 yield struct.pack('B', len(name))
406 yield struct.pack('B', len(name))
409 yield name
407 yield name
410
408
411 for chunk in gen:
409 for chunk in gen:
412 yield chunk
410 yield chunk
413
411
414 def setresponse(code, contenttype, bodybytes=None, bodygen=None):
412 def setresponse(code, contenttype, bodybytes=None, bodygen=None):
415 if code == HTTP_OK:
413 if code == HTTP_OK:
416 res.status = '200 Script output follows'
414 res.status = '200 Script output follows'
417 else:
415 else:
418 res.status = hgwebcommon.statusmessage(code)
416 res.status = hgwebcommon.statusmessage(code)
419
417
420 res.headers['Content-Type'] = contenttype
418 res.headers['Content-Type'] = contenttype
421
419
422 if bodybytes is not None:
420 if bodybytes is not None:
423 res.setbodybytes(bodybytes)
421 res.setbodybytes(bodybytes)
424 if bodygen is not None:
422 if bodygen is not None:
425 res.setbodygen(bodygen)
423 res.setbodygen(bodygen)
426
424
427 if not wireprotov1server.commands.commandavailable(cmd, proto):
425 if not wireprotov1server.commands.commandavailable(cmd, proto):
428 setresponse(HTTP_OK, HGERRTYPE,
426 setresponse(HTTP_OK, HGERRTYPE,
429 _('requested wire protocol command is not available over '
427 _('requested wire protocol command is not available over '
430 'HTTP'))
428 'HTTP'))
431 return
429 return
432
430
433 proto.checkperm(wireprotov1server.commands[cmd].permission)
431 proto.checkperm(wireprotov1server.commands[cmd].permission)
434
432
435 # Possibly handle a modern client wanting to switch protocols.
433 # Possibly handle a modern client wanting to switch protocols.
436 if (cmd == 'capabilities' and
434 if (cmd == 'capabilities' and
437 processcapabilitieshandshake(repo, req, res, proto)):
435 processcapabilitieshandshake(repo, req, res, proto)):
438
436
439 return
437 return
440
438
441 rsp = wireprotov1server.dispatch(repo, proto, cmd)
439 rsp = wireprotov1server.dispatch(repo, proto, cmd)
442
440
443 if isinstance(rsp, bytes):
441 if isinstance(rsp, bytes):
444 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
442 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
445 elif isinstance(rsp, wireprototypes.bytesresponse):
443 elif isinstance(rsp, wireprototypes.bytesresponse):
446 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp.data)
444 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp.data)
447 elif isinstance(rsp, wireprototypes.streamreslegacy):
445 elif isinstance(rsp, wireprototypes.streamreslegacy):
448 setresponse(HTTP_OK, HGTYPE, bodygen=rsp.gen)
446 setresponse(HTTP_OK, HGTYPE, bodygen=rsp.gen)
449 elif isinstance(rsp, wireprototypes.streamres):
447 elif isinstance(rsp, wireprototypes.streamres):
450 gen = rsp.gen
448 gen = rsp.gen
451
449
452 # This code for compression should not be streamres specific. It
450 # This code for compression should not be streamres specific. It
453 # is here because we only compress streamres at the moment.
451 # is here because we only compress streamres at the moment.
454 mediatype, engine, engineopts = _httpresponsetype(
452 mediatype, engine, engineopts = _httpresponsetype(
455 repo.ui, proto, rsp.prefer_uncompressed)
453 repo.ui, proto, rsp.prefer_uncompressed)
456 gen = engine.compressstream(gen, engineopts)
454 gen = engine.compressstream(gen, engineopts)
457
455
458 if mediatype == HGTYPE2:
456 if mediatype == HGTYPE2:
459 gen = genversion2(gen, engine, engineopts)
457 gen = genversion2(gen, engine, engineopts)
460
458
461 setresponse(HTTP_OK, mediatype, bodygen=gen)
459 setresponse(HTTP_OK, mediatype, bodygen=gen)
462 elif isinstance(rsp, wireprototypes.pushres):
460 elif isinstance(rsp, wireprototypes.pushres):
463 rsp = '%d\n%s' % (rsp.res, rsp.output)
461 rsp = '%d\n%s' % (rsp.res, rsp.output)
464 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
462 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
465 elif isinstance(rsp, wireprototypes.pusherr):
463 elif isinstance(rsp, wireprototypes.pusherr):
466 rsp = '0\n%s\n' % rsp.res
464 rsp = '0\n%s\n' % rsp.res
467 res.drain = True
465 res.drain = True
468 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
466 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
469 elif isinstance(rsp, wireprototypes.ooberror):
467 elif isinstance(rsp, wireprototypes.ooberror):
470 setresponse(HTTP_OK, HGERRTYPE, bodybytes=rsp.message)
468 setresponse(HTTP_OK, HGERRTYPE, bodybytes=rsp.message)
471 else:
469 else:
472 raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
470 raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
473
471
474 def _sshv1respondbytes(fout, value):
472 def _sshv1respondbytes(fout, value):
475 """Send a bytes response for protocol version 1."""
473 """Send a bytes response for protocol version 1."""
476 fout.write('%d\n' % len(value))
474 fout.write('%d\n' % len(value))
477 fout.write(value)
475 fout.write(value)
478 fout.flush()
476 fout.flush()
479
477
480 def _sshv1respondstream(fout, source):
478 def _sshv1respondstream(fout, source):
481 write = fout.write
479 write = fout.write
482 for chunk in source.gen:
480 for chunk in source.gen:
483 write(chunk)
481 write(chunk)
484 fout.flush()
482 fout.flush()
485
483
486 def _sshv1respondooberror(fout, ferr, rsp):
484 def _sshv1respondooberror(fout, ferr, rsp):
487 ferr.write(b'%s\n-\n' % rsp)
485 ferr.write(b'%s\n-\n' % rsp)
488 ferr.flush()
486 ferr.flush()
489 fout.write(b'\n')
487 fout.write(b'\n')
490 fout.flush()
488 fout.flush()
491
489
492 @zi.implementer(wireprototypes.baseprotocolhandler)
490 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
493 class sshv1protocolhandler(object):
491 class sshv1protocolhandler(object):
494 """Handler for requests services via version 1 of SSH protocol."""
492 """Handler for requests services via version 1 of SSH protocol."""
495 def __init__(self, ui, fin, fout):
493 def __init__(self, ui, fin, fout):
496 self._ui = ui
494 self._ui = ui
497 self._fin = fin
495 self._fin = fin
498 self._fout = fout
496 self._fout = fout
499 self._protocaps = set()
497 self._protocaps = set()
500
498
501 @property
499 @property
502 def name(self):
500 def name(self):
503 return wireprototypes.SSHV1
501 return wireprototypes.SSHV1
504
502
505 def getargs(self, args):
503 def getargs(self, args):
506 data = {}
504 data = {}
507 keys = args.split()
505 keys = args.split()
508 for n in xrange(len(keys)):
506 for n in xrange(len(keys)):
509 argline = self._fin.readline()[:-1]
507 argline = self._fin.readline()[:-1]
510 arg, l = argline.split()
508 arg, l = argline.split()
511 if arg not in keys:
509 if arg not in keys:
512 raise error.Abort(_("unexpected parameter %r") % arg)
510 raise error.Abort(_("unexpected parameter %r") % arg)
513 if arg == '*':
511 if arg == '*':
514 star = {}
512 star = {}
515 for k in xrange(int(l)):
513 for k in xrange(int(l)):
516 argline = self._fin.readline()[:-1]
514 argline = self._fin.readline()[:-1]
517 arg, l = argline.split()
515 arg, l = argline.split()
518 val = self._fin.read(int(l))
516 val = self._fin.read(int(l))
519 star[arg] = val
517 star[arg] = val
520 data['*'] = star
518 data['*'] = star
521 else:
519 else:
522 val = self._fin.read(int(l))
520 val = self._fin.read(int(l))
523 data[arg] = val
521 data[arg] = val
524 return [data[k] for k in keys]
522 return [data[k] for k in keys]
525
523
526 def getprotocaps(self):
524 def getprotocaps(self):
527 return self._protocaps
525 return self._protocaps
528
526
529 def getpayload(self):
527 def getpayload(self):
530 # We initially send an empty response. This tells the client it is
528 # We initially send an empty response. This tells the client it is
531 # OK to start sending data. If a client sees any other response, it
529 # OK to start sending data. If a client sees any other response, it
532 # interprets it as an error.
530 # interprets it as an error.
533 _sshv1respondbytes(self._fout, b'')
531 _sshv1respondbytes(self._fout, b'')
534
532
535 # The file is in the form:
533 # The file is in the form:
536 #
534 #
537 # <chunk size>\n<chunk>
535 # <chunk size>\n<chunk>
538 # ...
536 # ...
539 # 0\n
537 # 0\n
540 count = int(self._fin.readline())
538 count = int(self._fin.readline())
541 while count:
539 while count:
542 yield self._fin.read(count)
540 yield self._fin.read(count)
543 count = int(self._fin.readline())
541 count = int(self._fin.readline())
544
542
545 @contextlib.contextmanager
543 @contextlib.contextmanager
546 def mayberedirectstdio(self):
544 def mayberedirectstdio(self):
547 yield None
545 yield None
548
546
549 def client(self):
547 def client(self):
550 client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
548 client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
551 return 'remote:ssh:' + client
549 return 'remote:ssh:' + client
552
550
553 def addcapabilities(self, repo, caps):
551 def addcapabilities(self, repo, caps):
554 if self.name == wireprototypes.SSHV1:
552 if self.name == wireprototypes.SSHV1:
555 caps.append(b'protocaps')
553 caps.append(b'protocaps')
556 caps.append(b'batch')
554 caps.append(b'batch')
557 return caps
555 return caps
558
556
559 def checkperm(self, perm):
557 def checkperm(self, perm):
560 pass
558 pass
561
559
562 class sshv2protocolhandler(sshv1protocolhandler):
560 class sshv2protocolhandler(sshv1protocolhandler):
563 """Protocol handler for version 2 of the SSH protocol."""
561 """Protocol handler for version 2 of the SSH protocol."""
564
562
565 @property
563 @property
566 def name(self):
564 def name(self):
567 return wireprototypes.SSHV2
565 return wireprototypes.SSHV2
568
566
569 def addcapabilities(self, repo, caps):
567 def addcapabilities(self, repo, caps):
570 return caps
568 return caps
571
569
572 def _runsshserver(ui, repo, fin, fout, ev):
570 def _runsshserver(ui, repo, fin, fout, ev):
573 # This function operates like a state machine of sorts. The following
571 # This function operates like a state machine of sorts. The following
574 # states are defined:
572 # states are defined:
575 #
573 #
576 # protov1-serving
574 # protov1-serving
577 # Server is in protocol version 1 serving mode. Commands arrive on
575 # Server is in protocol version 1 serving mode. Commands arrive on
578 # new lines. These commands are processed in this state, one command
576 # new lines. These commands are processed in this state, one command
579 # after the other.
577 # after the other.
580 #
578 #
581 # protov2-serving
579 # protov2-serving
582 # Server is in protocol version 2 serving mode.
580 # Server is in protocol version 2 serving mode.
583 #
581 #
584 # upgrade-initial
582 # upgrade-initial
585 # The server is going to process an upgrade request.
583 # The server is going to process an upgrade request.
586 #
584 #
587 # upgrade-v2-filter-legacy-handshake
585 # upgrade-v2-filter-legacy-handshake
588 # The protocol is being upgraded to version 2. The server is expecting
586 # The protocol is being upgraded to version 2. The server is expecting
589 # the legacy handshake from version 1.
587 # the legacy handshake from version 1.
590 #
588 #
591 # upgrade-v2-finish
589 # upgrade-v2-finish
592 # The upgrade to version 2 of the protocol is imminent.
590 # The upgrade to version 2 of the protocol is imminent.
593 #
591 #
594 # shutdown
592 # shutdown
595 # The server is shutting down, possibly in reaction to a client event.
593 # The server is shutting down, possibly in reaction to a client event.
596 #
594 #
597 # And here are their transitions:
595 # And here are their transitions:
598 #
596 #
599 # protov1-serving -> shutdown
597 # protov1-serving -> shutdown
600 # When server receives an empty request or encounters another
598 # When server receives an empty request or encounters another
601 # error.
599 # error.
602 #
600 #
603 # protov1-serving -> upgrade-initial
601 # protov1-serving -> upgrade-initial
604 # An upgrade request line was seen.
602 # An upgrade request line was seen.
605 #
603 #
606 # upgrade-initial -> upgrade-v2-filter-legacy-handshake
604 # upgrade-initial -> upgrade-v2-filter-legacy-handshake
607 # Upgrade to version 2 in progress. Server is expecting to
605 # Upgrade to version 2 in progress. Server is expecting to
608 # process a legacy handshake.
606 # process a legacy handshake.
609 #
607 #
610 # upgrade-v2-filter-legacy-handshake -> shutdown
608 # upgrade-v2-filter-legacy-handshake -> shutdown
611 # Client did not fulfill upgrade handshake requirements.
609 # Client did not fulfill upgrade handshake requirements.
612 #
610 #
613 # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
611 # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
614 # Client fulfilled version 2 upgrade requirements. Finishing that
612 # Client fulfilled version 2 upgrade requirements. Finishing that
615 # upgrade.
613 # upgrade.
616 #
614 #
617 # upgrade-v2-finish -> protov2-serving
615 # upgrade-v2-finish -> protov2-serving
618 # Protocol upgrade to version 2 complete. Server can now speak protocol
616 # Protocol upgrade to version 2 complete. Server can now speak protocol
619 # version 2.
617 # version 2.
620 #
618 #
621 # protov2-serving -> protov1-serving
619 # protov2-serving -> protov1-serving
622 # Ths happens by default since protocol version 2 is the same as
620 # Ths happens by default since protocol version 2 is the same as
623 # version 1 except for the handshake.
621 # version 1 except for the handshake.
624
622
625 state = 'protov1-serving'
623 state = 'protov1-serving'
626 proto = sshv1protocolhandler(ui, fin, fout)
624 proto = sshv1protocolhandler(ui, fin, fout)
627 protoswitched = False
625 protoswitched = False
628
626
629 while not ev.is_set():
627 while not ev.is_set():
630 if state == 'protov1-serving':
628 if state == 'protov1-serving':
631 # Commands are issued on new lines.
629 # Commands are issued on new lines.
632 request = fin.readline()[:-1]
630 request = fin.readline()[:-1]
633
631
634 # Empty lines signal to terminate the connection.
632 # Empty lines signal to terminate the connection.
635 if not request:
633 if not request:
636 state = 'shutdown'
634 state = 'shutdown'
637 continue
635 continue
638
636
639 # It looks like a protocol upgrade request. Transition state to
637 # It looks like a protocol upgrade request. Transition state to
640 # handle it.
638 # handle it.
641 if request.startswith(b'upgrade '):
639 if request.startswith(b'upgrade '):
642 if protoswitched:
640 if protoswitched:
643 _sshv1respondooberror(fout, ui.ferr,
641 _sshv1respondooberror(fout, ui.ferr,
644 b'cannot upgrade protocols multiple '
642 b'cannot upgrade protocols multiple '
645 b'times')
643 b'times')
646 state = 'shutdown'
644 state = 'shutdown'
647 continue
645 continue
648
646
649 state = 'upgrade-initial'
647 state = 'upgrade-initial'
650 continue
648 continue
651
649
652 available = wireprotov1server.commands.commandavailable(
650 available = wireprotov1server.commands.commandavailable(
653 request, proto)
651 request, proto)
654
652
655 # This command isn't available. Send an empty response and go
653 # This command isn't available. Send an empty response and go
656 # back to waiting for a new command.
654 # back to waiting for a new command.
657 if not available:
655 if not available:
658 _sshv1respondbytes(fout, b'')
656 _sshv1respondbytes(fout, b'')
659 continue
657 continue
660
658
661 rsp = wireprotov1server.dispatch(repo, proto, request)
659 rsp = wireprotov1server.dispatch(repo, proto, request)
662
660
663 if isinstance(rsp, bytes):
661 if isinstance(rsp, bytes):
664 _sshv1respondbytes(fout, rsp)
662 _sshv1respondbytes(fout, rsp)
665 elif isinstance(rsp, wireprototypes.bytesresponse):
663 elif isinstance(rsp, wireprototypes.bytesresponse):
666 _sshv1respondbytes(fout, rsp.data)
664 _sshv1respondbytes(fout, rsp.data)
667 elif isinstance(rsp, wireprototypes.streamres):
665 elif isinstance(rsp, wireprototypes.streamres):
668 _sshv1respondstream(fout, rsp)
666 _sshv1respondstream(fout, rsp)
669 elif isinstance(rsp, wireprototypes.streamreslegacy):
667 elif isinstance(rsp, wireprototypes.streamreslegacy):
670 _sshv1respondstream(fout, rsp)
668 _sshv1respondstream(fout, rsp)
671 elif isinstance(rsp, wireprototypes.pushres):
669 elif isinstance(rsp, wireprototypes.pushres):
672 _sshv1respondbytes(fout, b'')
670 _sshv1respondbytes(fout, b'')
673 _sshv1respondbytes(fout, b'%d' % rsp.res)
671 _sshv1respondbytes(fout, b'%d' % rsp.res)
674 elif isinstance(rsp, wireprototypes.pusherr):
672 elif isinstance(rsp, wireprototypes.pusherr):
675 _sshv1respondbytes(fout, rsp.res)
673 _sshv1respondbytes(fout, rsp.res)
676 elif isinstance(rsp, wireprototypes.ooberror):
674 elif isinstance(rsp, wireprototypes.ooberror):
677 _sshv1respondooberror(fout, ui.ferr, rsp.message)
675 _sshv1respondooberror(fout, ui.ferr, rsp.message)
678 else:
676 else:
679 raise error.ProgrammingError('unhandled response type from '
677 raise error.ProgrammingError('unhandled response type from '
680 'wire protocol command: %s' % rsp)
678 'wire protocol command: %s' % rsp)
681
679
682 # For now, protocol version 2 serving just goes back to version 1.
680 # For now, protocol version 2 serving just goes back to version 1.
683 elif state == 'protov2-serving':
681 elif state == 'protov2-serving':
684 state = 'protov1-serving'
682 state = 'protov1-serving'
685 continue
683 continue
686
684
687 elif state == 'upgrade-initial':
685 elif state == 'upgrade-initial':
688 # We should never transition into this state if we've switched
686 # We should never transition into this state if we've switched
689 # protocols.
687 # protocols.
690 assert not protoswitched
688 assert not protoswitched
691 assert proto.name == wireprototypes.SSHV1
689 assert proto.name == wireprototypes.SSHV1
692
690
693 # Expected: upgrade <token> <capabilities>
691 # Expected: upgrade <token> <capabilities>
694 # If we get something else, the request is malformed. It could be
692 # If we get something else, the request is malformed. It could be
695 # from a future client that has altered the upgrade line content.
693 # from a future client that has altered the upgrade line content.
696 # We treat this as an unknown command.
694 # We treat this as an unknown command.
697 try:
695 try:
698 token, caps = request.split(b' ')[1:]
696 token, caps = request.split(b' ')[1:]
699 except ValueError:
697 except ValueError:
700 _sshv1respondbytes(fout, b'')
698 _sshv1respondbytes(fout, b'')
701 state = 'protov1-serving'
699 state = 'protov1-serving'
702 continue
700 continue
703
701
704 # Send empty response if we don't support upgrading protocols.
702 # Send empty response if we don't support upgrading protocols.
705 if not ui.configbool('experimental', 'sshserver.support-v2'):
703 if not ui.configbool('experimental', 'sshserver.support-v2'):
706 _sshv1respondbytes(fout, b'')
704 _sshv1respondbytes(fout, b'')
707 state = 'protov1-serving'
705 state = 'protov1-serving'
708 continue
706 continue
709
707
710 try:
708 try:
711 caps = urlreq.parseqs(caps)
709 caps = urlreq.parseqs(caps)
712 except ValueError:
710 except ValueError:
713 _sshv1respondbytes(fout, b'')
711 _sshv1respondbytes(fout, b'')
714 state = 'protov1-serving'
712 state = 'protov1-serving'
715 continue
713 continue
716
714
717 # We don't see an upgrade request to protocol version 2. Ignore
715 # We don't see an upgrade request to protocol version 2. Ignore
718 # the upgrade request.
716 # the upgrade request.
719 wantedprotos = caps.get(b'proto', [b''])[0]
717 wantedprotos = caps.get(b'proto', [b''])[0]
720 if SSHV2 not in wantedprotos:
718 if SSHV2 not in wantedprotos:
721 _sshv1respondbytes(fout, b'')
719 _sshv1respondbytes(fout, b'')
722 state = 'protov1-serving'
720 state = 'protov1-serving'
723 continue
721 continue
724
722
725 # It looks like we can honor this upgrade request to protocol 2.
723 # It looks like we can honor this upgrade request to protocol 2.
726 # Filter the rest of the handshake protocol request lines.
724 # Filter the rest of the handshake protocol request lines.
727 state = 'upgrade-v2-filter-legacy-handshake'
725 state = 'upgrade-v2-filter-legacy-handshake'
728 continue
726 continue
729
727
730 elif state == 'upgrade-v2-filter-legacy-handshake':
728 elif state == 'upgrade-v2-filter-legacy-handshake':
731 # Client should have sent legacy handshake after an ``upgrade``
729 # Client should have sent legacy handshake after an ``upgrade``
732 # request. Expected lines:
730 # request. Expected lines:
733 #
731 #
734 # hello
732 # hello
735 # between
733 # between
736 # pairs 81
734 # pairs 81
737 # 0000...-0000...
735 # 0000...-0000...
738
736
739 ok = True
737 ok = True
740 for line in (b'hello', b'between', b'pairs 81'):
738 for line in (b'hello', b'between', b'pairs 81'):
741 request = fin.readline()[:-1]
739 request = fin.readline()[:-1]
742
740
743 if request != line:
741 if request != line:
744 _sshv1respondooberror(fout, ui.ferr,
742 _sshv1respondooberror(fout, ui.ferr,
745 b'malformed handshake protocol: '
743 b'malformed handshake protocol: '
746 b'missing %s' % line)
744 b'missing %s' % line)
747 ok = False
745 ok = False
748 state = 'shutdown'
746 state = 'shutdown'
749 break
747 break
750
748
751 if not ok:
749 if not ok:
752 continue
750 continue
753
751
754 request = fin.read(81)
752 request = fin.read(81)
755 if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
753 if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
756 _sshv1respondooberror(fout, ui.ferr,
754 _sshv1respondooberror(fout, ui.ferr,
757 b'malformed handshake protocol: '
755 b'malformed handshake protocol: '
758 b'missing between argument value')
756 b'missing between argument value')
759 state = 'shutdown'
757 state = 'shutdown'
760 continue
758 continue
761
759
762 state = 'upgrade-v2-finish'
760 state = 'upgrade-v2-finish'
763 continue
761 continue
764
762
765 elif state == 'upgrade-v2-finish':
763 elif state == 'upgrade-v2-finish':
766 # Send the upgrade response.
764 # Send the upgrade response.
767 fout.write(b'upgraded %s %s\n' % (token, SSHV2))
765 fout.write(b'upgraded %s %s\n' % (token, SSHV2))
768 servercaps = wireprotov1server.capabilities(repo, proto)
766 servercaps = wireprotov1server.capabilities(repo, proto)
769 rsp = b'capabilities: %s' % servercaps.data
767 rsp = b'capabilities: %s' % servercaps.data
770 fout.write(b'%d\n%s\n' % (len(rsp), rsp))
768 fout.write(b'%d\n%s\n' % (len(rsp), rsp))
771 fout.flush()
769 fout.flush()
772
770
773 proto = sshv2protocolhandler(ui, fin, fout)
771 proto = sshv2protocolhandler(ui, fin, fout)
774 protoswitched = True
772 protoswitched = True
775
773
776 state = 'protov2-serving'
774 state = 'protov2-serving'
777 continue
775 continue
778
776
779 elif state == 'shutdown':
777 elif state == 'shutdown':
780 break
778 break
781
779
782 else:
780 else:
783 raise error.ProgrammingError('unhandled ssh server state: %s' %
781 raise error.ProgrammingError('unhandled ssh server state: %s' %
784 state)
782 state)
785
783
786 class sshserver(object):
784 class sshserver(object):
787 def __init__(self, ui, repo, logfh=None):
785 def __init__(self, ui, repo, logfh=None):
788 self._ui = ui
786 self._ui = ui
789 self._repo = repo
787 self._repo = repo
790 self._fin = ui.fin
788 self._fin = ui.fin
791 self._fout = ui.fout
789 self._fout = ui.fout
792
790
793 # Log write I/O to stdout and stderr if configured.
791 # Log write I/O to stdout and stderr if configured.
794 if logfh:
792 if logfh:
795 self._fout = util.makeloggingfileobject(
793 self._fout = util.makeloggingfileobject(
796 logfh, self._fout, 'o', logdata=True)
794 logfh, self._fout, 'o', logdata=True)
797 ui.ferr = util.makeloggingfileobject(
795 ui.ferr = util.makeloggingfileobject(
798 logfh, ui.ferr, 'e', logdata=True)
796 logfh, ui.ferr, 'e', logdata=True)
799
797
800 hook.redirect(True)
798 hook.redirect(True)
801 ui.fout = repo.ui.fout = ui.ferr
799 ui.fout = repo.ui.fout = ui.ferr
802
800
803 # Prevent insertion/deletion of CRs
801 # Prevent insertion/deletion of CRs
804 procutil.setbinary(self._fin)
802 procutil.setbinary(self._fin)
805 procutil.setbinary(self._fout)
803 procutil.setbinary(self._fout)
806
804
807 def serve_forever(self):
805 def serve_forever(self):
808 self.serveuntil(threading.Event())
806 self.serveuntil(threading.Event())
809 sys.exit(0)
807 sys.exit(0)
810
808
811 def serveuntil(self, ev):
809 def serveuntil(self, ev):
812 """Serve until a threading.Event is set."""
810 """Serve until a threading.Event is set."""
813 _runsshserver(self._ui, self._repo, self._fin, self._fout, ev)
811 _runsshserver(self._ui, self._repo, self._fin, self._fout, ev)
@@ -1,375 +1,375 b''
1 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
1 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 from .node import (
8 from .node import (
9 bin,
9 bin,
10 hex,
10 hex,
11 )
11 )
12 from .thirdparty.zope import (
13 interface as zi,
14 )
15 from .i18n import _
12 from .i18n import _
16 from . import (
13 from . import (
17 error,
14 error,
18 util,
15 util,
19 )
16 )
17 from .utils import (
18 interfaceutil,
19 )
20
20
21 # Names of the SSH protocol implementations.
21 # Names of the SSH protocol implementations.
22 SSHV1 = 'ssh-v1'
22 SSHV1 = 'ssh-v1'
23 # These are advertised over the wire. Increment the counters at the end
23 # These are advertised over the wire. Increment the counters at the end
24 # to reflect BC breakages.
24 # to reflect BC breakages.
25 SSHV2 = 'exp-ssh-v2-0001'
25 SSHV2 = 'exp-ssh-v2-0001'
26 HTTP_WIREPROTO_V2 = 'exp-http-v2-0001'
26 HTTP_WIREPROTO_V2 = 'exp-http-v2-0001'
27
27
28 # All available wire protocol transports.
28 # All available wire protocol transports.
29 TRANSPORTS = {
29 TRANSPORTS = {
30 SSHV1: {
30 SSHV1: {
31 'transport': 'ssh',
31 'transport': 'ssh',
32 'version': 1,
32 'version': 1,
33 },
33 },
34 SSHV2: {
34 SSHV2: {
35 'transport': 'ssh',
35 'transport': 'ssh',
36 # TODO mark as version 2 once all commands are implemented.
36 # TODO mark as version 2 once all commands are implemented.
37 'version': 1,
37 'version': 1,
38 },
38 },
39 'http-v1': {
39 'http-v1': {
40 'transport': 'http',
40 'transport': 'http',
41 'version': 1,
41 'version': 1,
42 },
42 },
43 HTTP_WIREPROTO_V2: {
43 HTTP_WIREPROTO_V2: {
44 'transport': 'http',
44 'transport': 'http',
45 'version': 2,
45 'version': 2,
46 }
46 }
47 }
47 }
48
48
49 class bytesresponse(object):
49 class bytesresponse(object):
50 """A wire protocol response consisting of raw bytes."""
50 """A wire protocol response consisting of raw bytes."""
51 def __init__(self, data):
51 def __init__(self, data):
52 self.data = data
52 self.data = data
53
53
54 class ooberror(object):
54 class ooberror(object):
55 """wireproto reply: failure of a batch of operation
55 """wireproto reply: failure of a batch of operation
56
56
57 Something failed during a batch call. The error message is stored in
57 Something failed during a batch call. The error message is stored in
58 `self.message`.
58 `self.message`.
59 """
59 """
60 def __init__(self, message):
60 def __init__(self, message):
61 self.message = message
61 self.message = message
62
62
63 class pushres(object):
63 class pushres(object):
64 """wireproto reply: success with simple integer return
64 """wireproto reply: success with simple integer return
65
65
66 The call was successful and returned an integer contained in `self.res`.
66 The call was successful and returned an integer contained in `self.res`.
67 """
67 """
68 def __init__(self, res, output):
68 def __init__(self, res, output):
69 self.res = res
69 self.res = res
70 self.output = output
70 self.output = output
71
71
72 class pusherr(object):
72 class pusherr(object):
73 """wireproto reply: failure
73 """wireproto reply: failure
74
74
75 The call failed. The `self.res` attribute contains the error message.
75 The call failed. The `self.res` attribute contains the error message.
76 """
76 """
77 def __init__(self, res, output):
77 def __init__(self, res, output):
78 self.res = res
78 self.res = res
79 self.output = output
79 self.output = output
80
80
81 class streamres(object):
81 class streamres(object):
82 """wireproto reply: binary stream
82 """wireproto reply: binary stream
83
83
84 The call was successful and the result is a stream.
84 The call was successful and the result is a stream.
85
85
86 Accepts a generator containing chunks of data to be sent to the client.
86 Accepts a generator containing chunks of data to be sent to the client.
87
87
88 ``prefer_uncompressed`` indicates that the data is expected to be
88 ``prefer_uncompressed`` indicates that the data is expected to be
89 uncompressable and that the stream should therefore use the ``none``
89 uncompressable and that the stream should therefore use the ``none``
90 engine.
90 engine.
91 """
91 """
92 def __init__(self, gen=None, prefer_uncompressed=False):
92 def __init__(self, gen=None, prefer_uncompressed=False):
93 self.gen = gen
93 self.gen = gen
94 self.prefer_uncompressed = prefer_uncompressed
94 self.prefer_uncompressed = prefer_uncompressed
95
95
96 class streamreslegacy(object):
96 class streamreslegacy(object):
97 """wireproto reply: uncompressed binary stream
97 """wireproto reply: uncompressed binary stream
98
98
99 The call was successful and the result is a stream.
99 The call was successful and the result is a stream.
100
100
101 Accepts a generator containing chunks of data to be sent to the client.
101 Accepts a generator containing chunks of data to be sent to the client.
102
102
103 Like ``streamres``, but sends an uncompressed data for "version 1" clients
103 Like ``streamres``, but sends an uncompressed data for "version 1" clients
104 using the application/mercurial-0.1 media type.
104 using the application/mercurial-0.1 media type.
105 """
105 """
106 def __init__(self, gen=None):
106 def __init__(self, gen=None):
107 self.gen = gen
107 self.gen = gen
108
108
109 class cborresponse(object):
109 class cborresponse(object):
110 """Encode the response value as CBOR."""
110 """Encode the response value as CBOR."""
111 def __init__(self, v):
111 def __init__(self, v):
112 self.value = v
112 self.value = v
113
113
114 class v2errorresponse(object):
114 class v2errorresponse(object):
115 """Represents a command error for version 2 transports."""
115 """Represents a command error for version 2 transports."""
116 def __init__(self, message, args=None):
116 def __init__(self, message, args=None):
117 self.message = message
117 self.message = message
118 self.args = args
118 self.args = args
119
119
120 class v2streamingresponse(object):
120 class v2streamingresponse(object):
121 """A response whose data is supplied by a generator.
121 """A response whose data is supplied by a generator.
122
122
123 The generator can either consist of data structures to CBOR
123 The generator can either consist of data structures to CBOR
124 encode or a stream of already-encoded bytes.
124 encode or a stream of already-encoded bytes.
125 """
125 """
126 def __init__(self, gen, compressible=True):
126 def __init__(self, gen, compressible=True):
127 self.gen = gen
127 self.gen = gen
128 self.compressible = compressible
128 self.compressible = compressible
129
129
130 # list of nodes encoding / decoding
130 # list of nodes encoding / decoding
131 def decodelist(l, sep=' '):
131 def decodelist(l, sep=' '):
132 if l:
132 if l:
133 return [bin(v) for v in l.split(sep)]
133 return [bin(v) for v in l.split(sep)]
134 return []
134 return []
135
135
136 def encodelist(l, sep=' '):
136 def encodelist(l, sep=' '):
137 try:
137 try:
138 return sep.join(map(hex, l))
138 return sep.join(map(hex, l))
139 except TypeError:
139 except TypeError:
140 raise
140 raise
141
141
142 # batched call argument encoding
142 # batched call argument encoding
143
143
144 def escapebatcharg(plain):
144 def escapebatcharg(plain):
145 return (plain
145 return (plain
146 .replace(':', ':c')
146 .replace(':', ':c')
147 .replace(',', ':o')
147 .replace(',', ':o')
148 .replace(';', ':s')
148 .replace(';', ':s')
149 .replace('=', ':e'))
149 .replace('=', ':e'))
150
150
151 def unescapebatcharg(escaped):
151 def unescapebatcharg(escaped):
152 return (escaped
152 return (escaped
153 .replace(':e', '=')
153 .replace(':e', '=')
154 .replace(':s', ';')
154 .replace(':s', ';')
155 .replace(':o', ',')
155 .replace(':o', ',')
156 .replace(':c', ':'))
156 .replace(':c', ':'))
157
157
158 # mapping of options accepted by getbundle and their types
158 # mapping of options accepted by getbundle and their types
159 #
159 #
160 # Meant to be extended by extensions. It is extensions responsibility to ensure
160 # Meant to be extended by extensions. It is extensions responsibility to ensure
161 # such options are properly processed in exchange.getbundle.
161 # such options are properly processed in exchange.getbundle.
162 #
162 #
163 # supported types are:
163 # supported types are:
164 #
164 #
165 # :nodes: list of binary nodes
165 # :nodes: list of binary nodes
166 # :csv: list of comma-separated values
166 # :csv: list of comma-separated values
167 # :scsv: list of comma-separated values return as set
167 # :scsv: list of comma-separated values return as set
168 # :plain: string with no transformation needed.
168 # :plain: string with no transformation needed.
169 GETBUNDLE_ARGUMENTS = {
169 GETBUNDLE_ARGUMENTS = {
170 'heads': 'nodes',
170 'heads': 'nodes',
171 'bookmarks': 'boolean',
171 'bookmarks': 'boolean',
172 'common': 'nodes',
172 'common': 'nodes',
173 'obsmarkers': 'boolean',
173 'obsmarkers': 'boolean',
174 'phases': 'boolean',
174 'phases': 'boolean',
175 'bundlecaps': 'scsv',
175 'bundlecaps': 'scsv',
176 'listkeys': 'csv',
176 'listkeys': 'csv',
177 'cg': 'boolean',
177 'cg': 'boolean',
178 'cbattempted': 'boolean',
178 'cbattempted': 'boolean',
179 'stream': 'boolean',
179 'stream': 'boolean',
180 }
180 }
181
181
182 class baseprotocolhandler(zi.Interface):
182 class baseprotocolhandler(interfaceutil.Interface):
183 """Abstract base class for wire protocol handlers.
183 """Abstract base class for wire protocol handlers.
184
184
185 A wire protocol handler serves as an interface between protocol command
185 A wire protocol handler serves as an interface between protocol command
186 handlers and the wire protocol transport layer. Protocol handlers provide
186 handlers and the wire protocol transport layer. Protocol handlers provide
187 methods to read command arguments, redirect stdio for the duration of
187 methods to read command arguments, redirect stdio for the duration of
188 the request, handle response types, etc.
188 the request, handle response types, etc.
189 """
189 """
190
190
191 name = zi.Attribute(
191 name = interfaceutil.Attribute(
192 """The name of the protocol implementation.
192 """The name of the protocol implementation.
193
193
194 Used for uniquely identifying the transport type.
194 Used for uniquely identifying the transport type.
195 """)
195 """)
196
196
197 def getargs(args):
197 def getargs(args):
198 """return the value for arguments in <args>
198 """return the value for arguments in <args>
199
199
200 For version 1 transports, returns a list of values in the same
200 For version 1 transports, returns a list of values in the same
201 order they appear in ``args``. For version 2 transports, returns
201 order they appear in ``args``. For version 2 transports, returns
202 a dict mapping argument name to value.
202 a dict mapping argument name to value.
203 """
203 """
204
204
205 def getprotocaps():
205 def getprotocaps():
206 """Returns the list of protocol-level capabilities of client
206 """Returns the list of protocol-level capabilities of client
207
207
208 Returns a list of capabilities as declared by the client for
208 Returns a list of capabilities as declared by the client for
209 the current request (or connection for stateful protocol handlers)."""
209 the current request (or connection for stateful protocol handlers)."""
210
210
211 def getpayload():
211 def getpayload():
212 """Provide a generator for the raw payload.
212 """Provide a generator for the raw payload.
213
213
214 The caller is responsible for ensuring that the full payload is
214 The caller is responsible for ensuring that the full payload is
215 processed.
215 processed.
216 """
216 """
217
217
218 def mayberedirectstdio():
218 def mayberedirectstdio():
219 """Context manager to possibly redirect stdio.
219 """Context manager to possibly redirect stdio.
220
220
221 The context manager yields a file-object like object that receives
221 The context manager yields a file-object like object that receives
222 stdout and stderr output when the context manager is active. Or it
222 stdout and stderr output when the context manager is active. Or it
223 yields ``None`` if no I/O redirection occurs.
223 yields ``None`` if no I/O redirection occurs.
224
224
225 The intent of this context manager is to capture stdio output
225 The intent of this context manager is to capture stdio output
226 so it may be sent in the response. Some transports support streaming
226 so it may be sent in the response. Some transports support streaming
227 stdio to the client in real time. For these transports, stdio output
227 stdio to the client in real time. For these transports, stdio output
228 won't be captured.
228 won't be captured.
229 """
229 """
230
230
231 def client():
231 def client():
232 """Returns a string representation of this client (as bytes)."""
232 """Returns a string representation of this client (as bytes)."""
233
233
234 def addcapabilities(repo, caps):
234 def addcapabilities(repo, caps):
235 """Adds advertised capabilities specific to this protocol.
235 """Adds advertised capabilities specific to this protocol.
236
236
237 Receives the list of capabilities collected so far.
237 Receives the list of capabilities collected so far.
238
238
239 Returns a list of capabilities. The passed in argument can be returned.
239 Returns a list of capabilities. The passed in argument can be returned.
240 """
240 """
241
241
242 def checkperm(perm):
242 def checkperm(perm):
243 """Validate that the client has permissions to perform a request.
243 """Validate that the client has permissions to perform a request.
244
244
245 The argument is the permission required to proceed. If the client
245 The argument is the permission required to proceed. If the client
246 doesn't have that permission, the exception should raise or abort
246 doesn't have that permission, the exception should raise or abort
247 in a protocol specific manner.
247 in a protocol specific manner.
248 """
248 """
249
249
250 class commandentry(object):
250 class commandentry(object):
251 """Represents a declared wire protocol command."""
251 """Represents a declared wire protocol command."""
252 def __init__(self, func, args='', transports=None,
252 def __init__(self, func, args='', transports=None,
253 permission='push'):
253 permission='push'):
254 self.func = func
254 self.func = func
255 self.args = args
255 self.args = args
256 self.transports = transports or set()
256 self.transports = transports or set()
257 self.permission = permission
257 self.permission = permission
258
258
259 def _merge(self, func, args):
259 def _merge(self, func, args):
260 """Merge this instance with an incoming 2-tuple.
260 """Merge this instance with an incoming 2-tuple.
261
261
262 This is called when a caller using the old 2-tuple API attempts
262 This is called when a caller using the old 2-tuple API attempts
263 to replace an instance. The incoming values are merged with
263 to replace an instance. The incoming values are merged with
264 data not captured by the 2-tuple and a new instance containing
264 data not captured by the 2-tuple and a new instance containing
265 the union of the two objects is returned.
265 the union of the two objects is returned.
266 """
266 """
267 return commandentry(func, args=args, transports=set(self.transports),
267 return commandentry(func, args=args, transports=set(self.transports),
268 permission=self.permission)
268 permission=self.permission)
269
269
270 # Old code treats instances as 2-tuples. So expose that interface.
270 # Old code treats instances as 2-tuples. So expose that interface.
271 def __iter__(self):
271 def __iter__(self):
272 yield self.func
272 yield self.func
273 yield self.args
273 yield self.args
274
274
275 def __getitem__(self, i):
275 def __getitem__(self, i):
276 if i == 0:
276 if i == 0:
277 return self.func
277 return self.func
278 elif i == 1:
278 elif i == 1:
279 return self.args
279 return self.args
280 else:
280 else:
281 raise IndexError('can only access elements 0 and 1')
281 raise IndexError('can only access elements 0 and 1')
282
282
283 class commanddict(dict):
283 class commanddict(dict):
284 """Container for registered wire protocol commands.
284 """Container for registered wire protocol commands.
285
285
286 It behaves like a dict. But __setitem__ is overwritten to allow silent
286 It behaves like a dict. But __setitem__ is overwritten to allow silent
287 coercion of values from 2-tuples for API compatibility.
287 coercion of values from 2-tuples for API compatibility.
288 """
288 """
289 def __setitem__(self, k, v):
289 def __setitem__(self, k, v):
290 if isinstance(v, commandentry):
290 if isinstance(v, commandentry):
291 pass
291 pass
292 # Cast 2-tuples to commandentry instances.
292 # Cast 2-tuples to commandentry instances.
293 elif isinstance(v, tuple):
293 elif isinstance(v, tuple):
294 if len(v) != 2:
294 if len(v) != 2:
295 raise ValueError('command tuples must have exactly 2 elements')
295 raise ValueError('command tuples must have exactly 2 elements')
296
296
297 # It is common for extensions to wrap wire protocol commands via
297 # It is common for extensions to wrap wire protocol commands via
298 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
298 # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
299 # doing this aren't aware of the new API that uses objects to store
299 # doing this aren't aware of the new API that uses objects to store
300 # command entries, we automatically merge old state with new.
300 # command entries, we automatically merge old state with new.
301 if k in self:
301 if k in self:
302 v = self[k]._merge(v[0], v[1])
302 v = self[k]._merge(v[0], v[1])
303 else:
303 else:
304 # Use default values from @wireprotocommand.
304 # Use default values from @wireprotocommand.
305 v = commandentry(v[0], args=v[1],
305 v = commandentry(v[0], args=v[1],
306 transports=set(TRANSPORTS),
306 transports=set(TRANSPORTS),
307 permission='push')
307 permission='push')
308 else:
308 else:
309 raise ValueError('command entries must be commandentry instances '
309 raise ValueError('command entries must be commandentry instances '
310 'or 2-tuples')
310 'or 2-tuples')
311
311
312 return super(commanddict, self).__setitem__(k, v)
312 return super(commanddict, self).__setitem__(k, v)
313
313
314 def commandavailable(self, command, proto):
314 def commandavailable(self, command, proto):
315 """Determine if a command is available for the requested protocol."""
315 """Determine if a command is available for the requested protocol."""
316 assert proto.name in TRANSPORTS
316 assert proto.name in TRANSPORTS
317
317
318 entry = self.get(command)
318 entry = self.get(command)
319
319
320 if not entry:
320 if not entry:
321 return False
321 return False
322
322
323 if proto.name not in entry.transports:
323 if proto.name not in entry.transports:
324 return False
324 return False
325
325
326 return True
326 return True
327
327
328 def supportedcompengines(ui, role):
328 def supportedcompengines(ui, role):
329 """Obtain the list of supported compression engines for a request."""
329 """Obtain the list of supported compression engines for a request."""
330 assert role in (util.CLIENTROLE, util.SERVERROLE)
330 assert role in (util.CLIENTROLE, util.SERVERROLE)
331
331
332 compengines = util.compengines.supportedwireengines(role)
332 compengines = util.compengines.supportedwireengines(role)
333
333
334 # Allow config to override default list and ordering.
334 # Allow config to override default list and ordering.
335 if role == util.SERVERROLE:
335 if role == util.SERVERROLE:
336 configengines = ui.configlist('server', 'compressionengines')
336 configengines = ui.configlist('server', 'compressionengines')
337 config = 'server.compressionengines'
337 config = 'server.compressionengines'
338 else:
338 else:
339 # This is currently implemented mainly to facilitate testing. In most
339 # This is currently implemented mainly to facilitate testing. In most
340 # cases, the server should be in charge of choosing a compression engine
340 # cases, the server should be in charge of choosing a compression engine
341 # because a server has the most to lose from a sub-optimal choice. (e.g.
341 # because a server has the most to lose from a sub-optimal choice. (e.g.
342 # CPU DoS due to an expensive engine or a network DoS due to poor
342 # CPU DoS due to an expensive engine or a network DoS due to poor
343 # compression ratio).
343 # compression ratio).
344 configengines = ui.configlist('experimental',
344 configengines = ui.configlist('experimental',
345 'clientcompressionengines')
345 'clientcompressionengines')
346 config = 'experimental.clientcompressionengines'
346 config = 'experimental.clientcompressionengines'
347
347
348 # No explicit config. Filter out the ones that aren't supposed to be
348 # No explicit config. Filter out the ones that aren't supposed to be
349 # advertised and return default ordering.
349 # advertised and return default ordering.
350 if not configengines:
350 if not configengines:
351 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
351 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
352 return [e for e in compengines
352 return [e for e in compengines
353 if getattr(e.wireprotosupport(), attr) > 0]
353 if getattr(e.wireprotosupport(), attr) > 0]
354
354
355 # If compression engines are listed in the config, assume there is a good
355 # If compression engines are listed in the config, assume there is a good
356 # reason for it (like server operators wanting to achieve specific
356 # reason for it (like server operators wanting to achieve specific
357 # performance characteristics). So fail fast if the config references
357 # performance characteristics). So fail fast if the config references
358 # unusable compression engines.
358 # unusable compression engines.
359 validnames = set(e.name() for e in compengines)
359 validnames = set(e.name() for e in compengines)
360 invalidnames = set(e for e in configengines if e not in validnames)
360 invalidnames = set(e for e in configengines if e not in validnames)
361 if invalidnames:
361 if invalidnames:
362 raise error.Abort(_('invalid compression engine defined in %s: %s') %
362 raise error.Abort(_('invalid compression engine defined in %s: %s') %
363 (config, ', '.join(sorted(invalidnames))))
363 (config, ', '.join(sorted(invalidnames))))
364
364
365 compengines = [e for e in compengines if e.name() in configengines]
365 compengines = [e for e in compengines if e.name() in configengines]
366 compengines = sorted(compengines,
366 compengines = sorted(compengines,
367 key=lambda e: configengines.index(e.name()))
367 key=lambda e: configengines.index(e.name()))
368
368
369 if not compengines:
369 if not compengines:
370 raise error.Abort(_('%s config option does not specify any known '
370 raise error.Abort(_('%s config option does not specify any known '
371 'compression engines') % config,
371 'compression engines') % config,
372 hint=_('usable compression engines: %s') %
372 hint=_('usable compression engines: %s') %
373 ', '.sorted(validnames))
373 ', '.sorted(validnames))
374
374
375 return compengines
375 return compengines
@@ -1,619 +1,620 b''
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import sys
11 import sys
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 )
17 )
18 from .thirdparty.zope import (
19 interface as zi,
20 )
21 from . import (
18 from . import (
22 bundle2,
19 bundle2,
23 changegroup as changegroupmod,
20 changegroup as changegroupmod,
24 encoding,
21 encoding,
25 error,
22 error,
26 pushkey as pushkeymod,
23 pushkey as pushkeymod,
27 pycompat,
24 pycompat,
28 repository,
25 repository,
29 util,
26 util,
30 wireprototypes,
27 wireprototypes,
31 )
28 )
29 from .utils import (
30 interfaceutil,
31 )
32
32
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35 def batchable(f):
35 def batchable(f):
36 '''annotation for batchable methods
36 '''annotation for batchable methods
37
37
38 Such methods must implement a coroutine as follows:
38 Such methods must implement a coroutine as follows:
39
39
40 @batchable
40 @batchable
41 def sample(self, one, two=None):
41 def sample(self, one, two=None):
42 # Build list of encoded arguments suitable for your wire protocol:
42 # Build list of encoded arguments suitable for your wire protocol:
43 encargs = [('one', encode(one),), ('two', encode(two),)]
43 encargs = [('one', encode(one),), ('two', encode(two),)]
44 # Create future for injection of encoded result:
44 # Create future for injection of encoded result:
45 encresref = future()
45 encresref = future()
46 # Return encoded arguments and future:
46 # Return encoded arguments and future:
47 yield encargs, encresref
47 yield encargs, encresref
48 # Assuming the future to be filled with the result from the batched
48 # Assuming the future to be filled with the result from the batched
49 # request now. Decode it:
49 # request now. Decode it:
50 yield decode(encresref.value)
50 yield decode(encresref.value)
51
51
52 The decorator returns a function which wraps this coroutine as a plain
52 The decorator returns a function which wraps this coroutine as a plain
53 method, but adds the original method as an attribute called "batchable",
53 method, but adds the original method as an attribute called "batchable",
54 which is used by remotebatch to split the call into separate encoding and
54 which is used by remotebatch to split the call into separate encoding and
55 decoding phases.
55 decoding phases.
56 '''
56 '''
57 def plain(*args, **opts):
57 def plain(*args, **opts):
58 batchable = f(*args, **opts)
58 batchable = f(*args, **opts)
59 encargsorres, encresref = next(batchable)
59 encargsorres, encresref = next(batchable)
60 if not encresref:
60 if not encresref:
61 return encargsorres # a local result in this case
61 return encargsorres # a local result in this case
62 self = args[0]
62 self = args[0]
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
64 encresref.set(self._submitone(cmd, encargsorres))
64 encresref.set(self._submitone(cmd, encargsorres))
65 return next(batchable)
65 return next(batchable)
66 setattr(plain, 'batchable', f)
66 setattr(plain, 'batchable', f)
67 return plain
67 return plain
68
68
69 class future(object):
69 class future(object):
70 '''placeholder for a value to be set later'''
70 '''placeholder for a value to be set later'''
71 def set(self, value):
71 def set(self, value):
72 if util.safehasattr(self, 'value'):
72 if util.safehasattr(self, 'value'):
73 raise error.RepoError("future is already set")
73 raise error.RepoError("future is already set")
74 self.value = value
74 self.value = value
75
75
76 def encodebatchcmds(req):
76 def encodebatchcmds(req):
77 """Return a ``cmds`` argument value for the ``batch`` command."""
77 """Return a ``cmds`` argument value for the ``batch`` command."""
78 escapearg = wireprototypes.escapebatcharg
78 escapearg = wireprototypes.escapebatcharg
79
79
80 cmds = []
80 cmds = []
81 for op, argsdict in req:
81 for op, argsdict in req:
82 # Old servers didn't properly unescape argument names. So prevent
82 # Old servers didn't properly unescape argument names. So prevent
83 # the sending of argument names that may not be decoded properly by
83 # the sending of argument names that may not be decoded properly by
84 # servers.
84 # servers.
85 assert all(escapearg(k) == k for k in argsdict)
85 assert all(escapearg(k) == k for k in argsdict)
86
86
87 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
87 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
88 for k, v in argsdict.iteritems())
88 for k, v in argsdict.iteritems())
89 cmds.append('%s %s' % (op, args))
89 cmds.append('%s %s' % (op, args))
90
90
91 return ';'.join(cmds)
91 return ';'.join(cmds)
92
92
93 class unsentfuture(pycompat.futures.Future):
93 class unsentfuture(pycompat.futures.Future):
94 """A Future variation to represent an unsent command.
94 """A Future variation to represent an unsent command.
95
95
96 Because we buffer commands and don't submit them immediately, calling
96 Because we buffer commands and don't submit them immediately, calling
97 ``result()`` on an unsent future could deadlock. Futures for buffered
97 ``result()`` on an unsent future could deadlock. Futures for buffered
98 commands are represented by this type, which wraps ``result()`` to
98 commands are represented by this type, which wraps ``result()`` to
99 call ``sendcommands()``.
99 call ``sendcommands()``.
100 """
100 """
101
101
102 def result(self, timeout=None):
102 def result(self, timeout=None):
103 if self.done():
103 if self.done():
104 return pycompat.futures.Future.result(self, timeout)
104 return pycompat.futures.Future.result(self, timeout)
105
105
106 self._peerexecutor.sendcommands()
106 self._peerexecutor.sendcommands()
107
107
108 # This looks like it will infinitely recurse. However,
108 # This looks like it will infinitely recurse. However,
109 # sendcommands() should modify __class__. This call serves as a check
109 # sendcommands() should modify __class__. This call serves as a check
110 # on that.
110 # on that.
111 return self.result(timeout)
111 return self.result(timeout)
112
112
113 @zi.implementer(repository.ipeercommandexecutor)
113 @interfaceutil.implementer(repository.ipeercommandexecutor)
114 class peerexecutor(object):
114 class peerexecutor(object):
115 def __init__(self, peer):
115 def __init__(self, peer):
116 self._peer = peer
116 self._peer = peer
117 self._sent = False
117 self._sent = False
118 self._closed = False
118 self._closed = False
119 self._calls = []
119 self._calls = []
120 self._futures = weakref.WeakSet()
120 self._futures = weakref.WeakSet()
121 self._responseexecutor = None
121 self._responseexecutor = None
122 self._responsef = None
122 self._responsef = None
123
123
124 def __enter__(self):
124 def __enter__(self):
125 return self
125 return self
126
126
127 def __exit__(self, exctype, excvalee, exctb):
127 def __exit__(self, exctype, excvalee, exctb):
128 self.close()
128 self.close()
129
129
130 def callcommand(self, command, args):
130 def callcommand(self, command, args):
131 if self._sent:
131 if self._sent:
132 raise error.ProgrammingError('callcommand() cannot be used '
132 raise error.ProgrammingError('callcommand() cannot be used '
133 'after commands are sent')
133 'after commands are sent')
134
134
135 if self._closed:
135 if self._closed:
136 raise error.ProgrammingError('callcommand() cannot be used '
136 raise error.ProgrammingError('callcommand() cannot be used '
137 'after close()')
137 'after close()')
138
138
139 # Commands are dispatched through methods on the peer.
139 # Commands are dispatched through methods on the peer.
140 fn = getattr(self._peer, pycompat.sysstr(command), None)
140 fn = getattr(self._peer, pycompat.sysstr(command), None)
141
141
142 if not fn:
142 if not fn:
143 raise error.ProgrammingError(
143 raise error.ProgrammingError(
144 'cannot call command %s: method of same name not available '
144 'cannot call command %s: method of same name not available '
145 'on peer' % command)
145 'on peer' % command)
146
146
147 # Commands are either batchable or they aren't. If a command
147 # Commands are either batchable or they aren't. If a command
148 # isn't batchable, we send it immediately because the executor
148 # isn't batchable, we send it immediately because the executor
149 # can no longer accept new commands after a non-batchable command.
149 # can no longer accept new commands after a non-batchable command.
150 # If a command is batchable, we queue it for later. But we have
150 # If a command is batchable, we queue it for later. But we have
151 # to account for the case of a non-batchable command arriving after
151 # to account for the case of a non-batchable command arriving after
152 # a batchable one and refuse to service it.
152 # a batchable one and refuse to service it.
153
153
154 def addcall():
154 def addcall():
155 f = pycompat.futures.Future()
155 f = pycompat.futures.Future()
156 self._futures.add(f)
156 self._futures.add(f)
157 self._calls.append((command, args, fn, f))
157 self._calls.append((command, args, fn, f))
158 return f
158 return f
159
159
160 if getattr(fn, 'batchable', False):
160 if getattr(fn, 'batchable', False):
161 f = addcall()
161 f = addcall()
162
162
163 # But since we don't issue it immediately, we wrap its result()
163 # But since we don't issue it immediately, we wrap its result()
164 # to trigger sending so we avoid deadlocks.
164 # to trigger sending so we avoid deadlocks.
165 f.__class__ = unsentfuture
165 f.__class__ = unsentfuture
166 f._peerexecutor = self
166 f._peerexecutor = self
167 else:
167 else:
168 if self._calls:
168 if self._calls:
169 raise error.ProgrammingError(
169 raise error.ProgrammingError(
170 '%s is not batchable and cannot be called on a command '
170 '%s is not batchable and cannot be called on a command '
171 'executor along with other commands' % command)
171 'executor along with other commands' % command)
172
172
173 f = addcall()
173 f = addcall()
174
174
175 # Non-batchable commands can never coexist with another command
175 # Non-batchable commands can never coexist with another command
176 # in this executor. So send the command immediately.
176 # in this executor. So send the command immediately.
177 self.sendcommands()
177 self.sendcommands()
178
178
179 return f
179 return f
180
180
181 def sendcommands(self):
181 def sendcommands(self):
182 if self._sent:
182 if self._sent:
183 return
183 return
184
184
185 if not self._calls:
185 if not self._calls:
186 return
186 return
187
187
188 self._sent = True
188 self._sent = True
189
189
190 # Unhack any future types so caller seens a clean type and to break
190 # Unhack any future types so caller seens a clean type and to break
191 # cycle between us and futures.
191 # cycle between us and futures.
192 for f in self._futures:
192 for f in self._futures:
193 if isinstance(f, unsentfuture):
193 if isinstance(f, unsentfuture):
194 f.__class__ = pycompat.futures.Future
194 f.__class__ = pycompat.futures.Future
195 f._peerexecutor = None
195 f._peerexecutor = None
196
196
197 calls = self._calls
197 calls = self._calls
198 # Mainly to destroy references to futures.
198 # Mainly to destroy references to futures.
199 self._calls = None
199 self._calls = None
200
200
201 # Simple case of a single command. We call it synchronously.
201 # Simple case of a single command. We call it synchronously.
202 if len(calls) == 1:
202 if len(calls) == 1:
203 command, args, fn, f = calls[0]
203 command, args, fn, f = calls[0]
204
204
205 # Future was cancelled. Ignore it.
205 # Future was cancelled. Ignore it.
206 if not f.set_running_or_notify_cancel():
206 if not f.set_running_or_notify_cancel():
207 return
207 return
208
208
209 try:
209 try:
210 result = fn(**pycompat.strkwargs(args))
210 result = fn(**pycompat.strkwargs(args))
211 except Exception:
211 except Exception:
212 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
212 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
213 else:
213 else:
214 f.set_result(result)
214 f.set_result(result)
215
215
216 return
216 return
217
217
218 # Batch commands are a bit harder. First, we have to deal with the
218 # Batch commands are a bit harder. First, we have to deal with the
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
220 # need to preserve streaming. i.e. it should be possible for the
220 # need to preserve streaming. i.e. it should be possible for the
221 # futures to resolve as data is coming in off the wire without having
221 # futures to resolve as data is coming in off the wire without having
222 # to wait for the final byte of the final response. We do this by
222 # to wait for the final byte of the final response. We do this by
223 # spinning up a thread to read the responses.
223 # spinning up a thread to read the responses.
224
224
225 requests = []
225 requests = []
226 states = []
226 states = []
227
227
228 for command, args, fn, f in calls:
228 for command, args, fn, f in calls:
229 # Future was cancelled. Ignore it.
229 # Future was cancelled. Ignore it.
230 if not f.set_running_or_notify_cancel():
230 if not f.set_running_or_notify_cancel():
231 continue
231 continue
232
232
233 try:
233 try:
234 batchable = fn.batchable(fn.__self__,
234 batchable = fn.batchable(fn.__self__,
235 **pycompat.strkwargs(args))
235 **pycompat.strkwargs(args))
236 except Exception:
236 except Exception:
237 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
237 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
238 return
238 return
239
239
240 # Encoded arguments and future holding remote result.
240 # Encoded arguments and future holding remote result.
241 try:
241 try:
242 encodedargs, fremote = next(batchable)
242 encodedargs, fremote = next(batchable)
243 except Exception:
243 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 return
245 return
246
246
247 requests.append((command, encodedargs))
247 requests.append((command, encodedargs))
248 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, fremote))
249
249
250 if not requests:
250 if not requests:
251 return
251 return
252
252
253 # This will emit responses in order they were executed.
253 # This will emit responses in order they were executed.
254 wireresults = self._peer._submitbatch(requests)
254 wireresults = self._peer._submitbatch(requests)
255
255
256 # The use of a thread pool executor here is a bit weird for something
256 # The use of a thread pool executor here is a bit weird for something
257 # that only spins up a single thread. However, thread management is
257 # that only spins up a single thread. However, thread management is
258 # hard and it is easy to encounter race conditions, deadlocks, etc.
258 # hard and it is easy to encounter race conditions, deadlocks, etc.
259 # concurrent.futures already solves these problems and its thread pool
259 # concurrent.futures already solves these problems and its thread pool
260 # executor has minimal overhead. So we use it.
260 # executor has minimal overhead. So we use it.
261 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
261 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
262 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
262 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
263 states, wireresults)
263 states, wireresults)
264
264
265 def close(self):
265 def close(self):
266 self.sendcommands()
266 self.sendcommands()
267
267
268 if self._closed:
268 if self._closed:
269 return
269 return
270
270
271 self._closed = True
271 self._closed = True
272
272
273 if not self._responsef:
273 if not self._responsef:
274 return
274 return
275
275
276 # We need to wait on our in-flight response and then shut down the
276 # We need to wait on our in-flight response and then shut down the
277 # executor once we have a result.
277 # executor once we have a result.
278 try:
278 try:
279 self._responsef.result()
279 self._responsef.result()
280 finally:
280 finally:
281 self._responseexecutor.shutdown(wait=True)
281 self._responseexecutor.shutdown(wait=True)
282 self._responsef = None
282 self._responsef = None
283 self._responseexecutor = None
283 self._responseexecutor = None
284
284
285 # If any of our futures are still in progress, mark them as
285 # If any of our futures are still in progress, mark them as
286 # errored. Otherwise a result() could wait indefinitely.
286 # errored. Otherwise a result() could wait indefinitely.
287 for f in self._futures:
287 for f in self._futures:
288 if not f.done():
288 if not f.done():
289 f.set_exception(error.ResponseError(
289 f.set_exception(error.ResponseError(
290 _('unfulfilled batch command response')))
290 _('unfulfilled batch command response')))
291
291
292 self._futures = None
292 self._futures = None
293
293
294 def _readbatchresponse(self, states, wireresults):
294 def _readbatchresponse(self, states, wireresults):
295 # Executes in a thread to read data off the wire.
295 # Executes in a thread to read data off the wire.
296
296
297 for command, f, batchable, fremote in states:
297 for command, f, batchable, fremote in states:
298 # Grab raw result off the wire and teach the internal future
298 # Grab raw result off the wire and teach the internal future
299 # about it.
299 # about it.
300 remoteresult = next(wireresults)
300 remoteresult = next(wireresults)
301 fremote.set(remoteresult)
301 fremote.set(remoteresult)
302
302
303 # And ask the coroutine to decode that value.
303 # And ask the coroutine to decode that value.
304 try:
304 try:
305 result = next(batchable)
305 result = next(batchable)
306 except Exception:
306 except Exception:
307 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
307 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
308 else:
308 else:
309 f.set_result(result)
309 f.set_result(result)
310
310
311 @zi.implementer(repository.ipeercommands, repository.ipeerlegacycommands)
311 @interfaceutil.implementer(repository.ipeercommands,
312 repository.ipeerlegacycommands)
312 class wirepeer(repository.peer):
313 class wirepeer(repository.peer):
313 """Client-side interface for communicating with a peer repository.
314 """Client-side interface for communicating with a peer repository.
314
315
315 Methods commonly call wire protocol commands of the same name.
316 Methods commonly call wire protocol commands of the same name.
316
317
317 See also httppeer.py and sshpeer.py for protocol-specific
318 See also httppeer.py and sshpeer.py for protocol-specific
318 implementations of this interface.
319 implementations of this interface.
319 """
320 """
320 def commandexecutor(self):
321 def commandexecutor(self):
321 return peerexecutor(self)
322 return peerexecutor(self)
322
323
323 # Begin of ipeercommands interface.
324 # Begin of ipeercommands interface.
324
325
325 def clonebundles(self):
326 def clonebundles(self):
326 self.requirecap('clonebundles', _('clone bundles'))
327 self.requirecap('clonebundles', _('clone bundles'))
327 return self._call('clonebundles')
328 return self._call('clonebundles')
328
329
329 @batchable
330 @batchable
330 def lookup(self, key):
331 def lookup(self, key):
331 self.requirecap('lookup', _('look up remote revision'))
332 self.requirecap('lookup', _('look up remote revision'))
332 f = future()
333 f = future()
333 yield {'key': encoding.fromlocal(key)}, f
334 yield {'key': encoding.fromlocal(key)}, f
334 d = f.value
335 d = f.value
335 success, data = d[:-1].split(" ", 1)
336 success, data = d[:-1].split(" ", 1)
336 if int(success):
337 if int(success):
337 yield bin(data)
338 yield bin(data)
338 else:
339 else:
339 self._abort(error.RepoError(data))
340 self._abort(error.RepoError(data))
340
341
341 @batchable
342 @batchable
342 def heads(self):
343 def heads(self):
343 f = future()
344 f = future()
344 yield {}, f
345 yield {}, f
345 d = f.value
346 d = f.value
346 try:
347 try:
347 yield wireprototypes.decodelist(d[:-1])
348 yield wireprototypes.decodelist(d[:-1])
348 except ValueError:
349 except ValueError:
349 self._abort(error.ResponseError(_("unexpected response:"), d))
350 self._abort(error.ResponseError(_("unexpected response:"), d))
350
351
351 @batchable
352 @batchable
352 def known(self, nodes):
353 def known(self, nodes):
353 f = future()
354 f = future()
354 yield {'nodes': wireprototypes.encodelist(nodes)}, f
355 yield {'nodes': wireprototypes.encodelist(nodes)}, f
355 d = f.value
356 d = f.value
356 try:
357 try:
357 yield [bool(int(b)) for b in d]
358 yield [bool(int(b)) for b in d]
358 except ValueError:
359 except ValueError:
359 self._abort(error.ResponseError(_("unexpected response:"), d))
360 self._abort(error.ResponseError(_("unexpected response:"), d))
360
361
361 @batchable
362 @batchable
362 def branchmap(self):
363 def branchmap(self):
363 f = future()
364 f = future()
364 yield {}, f
365 yield {}, f
365 d = f.value
366 d = f.value
366 try:
367 try:
367 branchmap = {}
368 branchmap = {}
368 for branchpart in d.splitlines():
369 for branchpart in d.splitlines():
369 branchname, branchheads = branchpart.split(' ', 1)
370 branchname, branchheads = branchpart.split(' ', 1)
370 branchname = encoding.tolocal(urlreq.unquote(branchname))
371 branchname = encoding.tolocal(urlreq.unquote(branchname))
371 branchheads = wireprototypes.decodelist(branchheads)
372 branchheads = wireprototypes.decodelist(branchheads)
372 branchmap[branchname] = branchheads
373 branchmap[branchname] = branchheads
373 yield branchmap
374 yield branchmap
374 except TypeError:
375 except TypeError:
375 self._abort(error.ResponseError(_("unexpected response:"), d))
376 self._abort(error.ResponseError(_("unexpected response:"), d))
376
377
377 @batchable
378 @batchable
378 def listkeys(self, namespace):
379 def listkeys(self, namespace):
379 if not self.capable('pushkey'):
380 if not self.capable('pushkey'):
380 yield {}, None
381 yield {}, None
381 f = future()
382 f = future()
382 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
383 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
383 yield {'namespace': encoding.fromlocal(namespace)}, f
384 yield {'namespace': encoding.fromlocal(namespace)}, f
384 d = f.value
385 d = f.value
385 self.ui.debug('received listkey for "%s": %i bytes\n'
386 self.ui.debug('received listkey for "%s": %i bytes\n'
386 % (namespace, len(d)))
387 % (namespace, len(d)))
387 yield pushkeymod.decodekeys(d)
388 yield pushkeymod.decodekeys(d)
388
389
389 @batchable
390 @batchable
390 def pushkey(self, namespace, key, old, new):
391 def pushkey(self, namespace, key, old, new):
391 if not self.capable('pushkey'):
392 if not self.capable('pushkey'):
392 yield False, None
393 yield False, None
393 f = future()
394 f = future()
394 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
395 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
395 yield {'namespace': encoding.fromlocal(namespace),
396 yield {'namespace': encoding.fromlocal(namespace),
396 'key': encoding.fromlocal(key),
397 'key': encoding.fromlocal(key),
397 'old': encoding.fromlocal(old),
398 'old': encoding.fromlocal(old),
398 'new': encoding.fromlocal(new)}, f
399 'new': encoding.fromlocal(new)}, f
399 d = f.value
400 d = f.value
400 d, output = d.split('\n', 1)
401 d, output = d.split('\n', 1)
401 try:
402 try:
402 d = bool(int(d))
403 d = bool(int(d))
403 except ValueError:
404 except ValueError:
404 raise error.ResponseError(
405 raise error.ResponseError(
405 _('push failed (unexpected response):'), d)
406 _('push failed (unexpected response):'), d)
406 for l in output.splitlines(True):
407 for l in output.splitlines(True):
407 self.ui.status(_('remote: '), l)
408 self.ui.status(_('remote: '), l)
408 yield d
409 yield d
409
410
410 def stream_out(self):
411 def stream_out(self):
411 return self._callstream('stream_out')
412 return self._callstream('stream_out')
412
413
413 def getbundle(self, source, **kwargs):
414 def getbundle(self, source, **kwargs):
414 kwargs = pycompat.byteskwargs(kwargs)
415 kwargs = pycompat.byteskwargs(kwargs)
415 self.requirecap('getbundle', _('look up remote changes'))
416 self.requirecap('getbundle', _('look up remote changes'))
416 opts = {}
417 opts = {}
417 bundlecaps = kwargs.get('bundlecaps') or set()
418 bundlecaps = kwargs.get('bundlecaps') or set()
418 for key, value in kwargs.iteritems():
419 for key, value in kwargs.iteritems():
419 if value is None:
420 if value is None:
420 continue
421 continue
421 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
422 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
422 if keytype is None:
423 if keytype is None:
423 raise error.ProgrammingError(
424 raise error.ProgrammingError(
424 'Unexpectedly None keytype for key %s' % key)
425 'Unexpectedly None keytype for key %s' % key)
425 elif keytype == 'nodes':
426 elif keytype == 'nodes':
426 value = wireprototypes.encodelist(value)
427 value = wireprototypes.encodelist(value)
427 elif keytype == 'csv':
428 elif keytype == 'csv':
428 value = ','.join(value)
429 value = ','.join(value)
429 elif keytype == 'scsv':
430 elif keytype == 'scsv':
430 value = ','.join(sorted(value))
431 value = ','.join(sorted(value))
431 elif keytype == 'boolean':
432 elif keytype == 'boolean':
432 value = '%i' % bool(value)
433 value = '%i' % bool(value)
433 elif keytype != 'plain':
434 elif keytype != 'plain':
434 raise KeyError('unknown getbundle option type %s'
435 raise KeyError('unknown getbundle option type %s'
435 % keytype)
436 % keytype)
436 opts[key] = value
437 opts[key] = value
437 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
438 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
438 if any((cap.startswith('HG2') for cap in bundlecaps)):
439 if any((cap.startswith('HG2') for cap in bundlecaps)):
439 return bundle2.getunbundler(self.ui, f)
440 return bundle2.getunbundler(self.ui, f)
440 else:
441 else:
441 return changegroupmod.cg1unpacker(f, 'UN')
442 return changegroupmod.cg1unpacker(f, 'UN')
442
443
443 def unbundle(self, bundle, heads, url):
444 def unbundle(self, bundle, heads, url):
444 '''Send cg (a readable file-like object representing the
445 '''Send cg (a readable file-like object representing the
445 changegroup to push, typically a chunkbuffer object) to the
446 changegroup to push, typically a chunkbuffer object) to the
446 remote server as a bundle.
447 remote server as a bundle.
447
448
448 When pushing a bundle10 stream, return an integer indicating the
449 When pushing a bundle10 stream, return an integer indicating the
449 result of the push (see changegroup.apply()).
450 result of the push (see changegroup.apply()).
450
451
451 When pushing a bundle20 stream, return a bundle20 stream.
452 When pushing a bundle20 stream, return a bundle20 stream.
452
453
453 `url` is the url the client thinks it's pushing to, which is
454 `url` is the url the client thinks it's pushing to, which is
454 visible to hooks.
455 visible to hooks.
455 '''
456 '''
456
457
457 if heads != ['force'] and self.capable('unbundlehash'):
458 if heads != ['force'] and self.capable('unbundlehash'):
458 heads = wireprototypes.encodelist(
459 heads = wireprototypes.encodelist(
459 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
460 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
460 else:
461 else:
461 heads = wireprototypes.encodelist(heads)
462 heads = wireprototypes.encodelist(heads)
462
463
463 if util.safehasattr(bundle, 'deltaheader'):
464 if util.safehasattr(bundle, 'deltaheader'):
464 # this a bundle10, do the old style call sequence
465 # this a bundle10, do the old style call sequence
465 ret, output = self._callpush("unbundle", bundle, heads=heads)
466 ret, output = self._callpush("unbundle", bundle, heads=heads)
466 if ret == "":
467 if ret == "":
467 raise error.ResponseError(
468 raise error.ResponseError(
468 _('push failed:'), output)
469 _('push failed:'), output)
469 try:
470 try:
470 ret = int(ret)
471 ret = int(ret)
471 except ValueError:
472 except ValueError:
472 raise error.ResponseError(
473 raise error.ResponseError(
473 _('push failed (unexpected response):'), ret)
474 _('push failed (unexpected response):'), ret)
474
475
475 for l in output.splitlines(True):
476 for l in output.splitlines(True):
476 self.ui.status(_('remote: '), l)
477 self.ui.status(_('remote: '), l)
477 else:
478 else:
478 # bundle2 push. Send a stream, fetch a stream.
479 # bundle2 push. Send a stream, fetch a stream.
479 stream = self._calltwowaystream('unbundle', bundle, heads=heads)
480 stream = self._calltwowaystream('unbundle', bundle, heads=heads)
480 ret = bundle2.getunbundler(self.ui, stream)
481 ret = bundle2.getunbundler(self.ui, stream)
481 return ret
482 return ret
482
483
483 # End of ipeercommands interface.
484 # End of ipeercommands interface.
484
485
485 # Begin of ipeerlegacycommands interface.
486 # Begin of ipeerlegacycommands interface.
486
487
487 def branches(self, nodes):
488 def branches(self, nodes):
488 n = wireprototypes.encodelist(nodes)
489 n = wireprototypes.encodelist(nodes)
489 d = self._call("branches", nodes=n)
490 d = self._call("branches", nodes=n)
490 try:
491 try:
491 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
492 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
492 return br
493 return br
493 except ValueError:
494 except ValueError:
494 self._abort(error.ResponseError(_("unexpected response:"), d))
495 self._abort(error.ResponseError(_("unexpected response:"), d))
495
496
496 def between(self, pairs):
497 def between(self, pairs):
497 batch = 8 # avoid giant requests
498 batch = 8 # avoid giant requests
498 r = []
499 r = []
499 for i in xrange(0, len(pairs), batch):
500 for i in xrange(0, len(pairs), batch):
500 n = " ".join([wireprototypes.encodelist(p, '-')
501 n = " ".join([wireprototypes.encodelist(p, '-')
501 for p in pairs[i:i + batch]])
502 for p in pairs[i:i + batch]])
502 d = self._call("between", pairs=n)
503 d = self._call("between", pairs=n)
503 try:
504 try:
504 r.extend(l and wireprototypes.decodelist(l) or []
505 r.extend(l and wireprototypes.decodelist(l) or []
505 for l in d.splitlines())
506 for l in d.splitlines())
506 except ValueError:
507 except ValueError:
507 self._abort(error.ResponseError(_("unexpected response:"), d))
508 self._abort(error.ResponseError(_("unexpected response:"), d))
508 return r
509 return r
509
510
510 def changegroup(self, nodes, source):
511 def changegroup(self, nodes, source):
511 n = wireprototypes.encodelist(nodes)
512 n = wireprototypes.encodelist(nodes)
512 f = self._callcompressable("changegroup", roots=n)
513 f = self._callcompressable("changegroup", roots=n)
513 return changegroupmod.cg1unpacker(f, 'UN')
514 return changegroupmod.cg1unpacker(f, 'UN')
514
515
515 def changegroupsubset(self, bases, heads, source):
516 def changegroupsubset(self, bases, heads, source):
516 self.requirecap('changegroupsubset', _('look up remote changes'))
517 self.requirecap('changegroupsubset', _('look up remote changes'))
517 bases = wireprototypes.encodelist(bases)
518 bases = wireprototypes.encodelist(bases)
518 heads = wireprototypes.encodelist(heads)
519 heads = wireprototypes.encodelist(heads)
519 f = self._callcompressable("changegroupsubset",
520 f = self._callcompressable("changegroupsubset",
520 bases=bases, heads=heads)
521 bases=bases, heads=heads)
521 return changegroupmod.cg1unpacker(f, 'UN')
522 return changegroupmod.cg1unpacker(f, 'UN')
522
523
523 # End of ipeerlegacycommands interface.
524 # End of ipeerlegacycommands interface.
524
525
525 def _submitbatch(self, req):
526 def _submitbatch(self, req):
526 """run batch request <req> on the server
527 """run batch request <req> on the server
527
528
528 Returns an iterator of the raw responses from the server.
529 Returns an iterator of the raw responses from the server.
529 """
530 """
530 ui = self.ui
531 ui = self.ui
531 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
532 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
532 ui.debug('devel-peer-request: batched-content\n')
533 ui.debug('devel-peer-request: batched-content\n')
533 for op, args in req:
534 for op, args in req:
534 msg = 'devel-peer-request: - %s (%d arguments)\n'
535 msg = 'devel-peer-request: - %s (%d arguments)\n'
535 ui.debug(msg % (op, len(args)))
536 ui.debug(msg % (op, len(args)))
536
537
537 unescapearg = wireprototypes.unescapebatcharg
538 unescapearg = wireprototypes.unescapebatcharg
538
539
539 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
540 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
540 chunk = rsp.read(1024)
541 chunk = rsp.read(1024)
541 work = [chunk]
542 work = [chunk]
542 while chunk:
543 while chunk:
543 while ';' not in chunk and chunk:
544 while ';' not in chunk and chunk:
544 chunk = rsp.read(1024)
545 chunk = rsp.read(1024)
545 work.append(chunk)
546 work.append(chunk)
546 merged = ''.join(work)
547 merged = ''.join(work)
547 while ';' in merged:
548 while ';' in merged:
548 one, merged = merged.split(';', 1)
549 one, merged = merged.split(';', 1)
549 yield unescapearg(one)
550 yield unescapearg(one)
550 chunk = rsp.read(1024)
551 chunk = rsp.read(1024)
551 work = [merged, chunk]
552 work = [merged, chunk]
552 yield unescapearg(''.join(work))
553 yield unescapearg(''.join(work))
553
554
554 def _submitone(self, op, args):
555 def _submitone(self, op, args):
555 return self._call(op, **pycompat.strkwargs(args))
556 return self._call(op, **pycompat.strkwargs(args))
556
557
557 def debugwireargs(self, one, two, three=None, four=None, five=None):
558 def debugwireargs(self, one, two, three=None, four=None, five=None):
558 # don't pass optional arguments left at their default value
559 # don't pass optional arguments left at their default value
559 opts = {}
560 opts = {}
560 if three is not None:
561 if three is not None:
561 opts[r'three'] = three
562 opts[r'three'] = three
562 if four is not None:
563 if four is not None:
563 opts[r'four'] = four
564 opts[r'four'] = four
564 return self._call('debugwireargs', one=one, two=two, **opts)
565 return self._call('debugwireargs', one=one, two=two, **opts)
565
566
566 def _call(self, cmd, **args):
567 def _call(self, cmd, **args):
567 """execute <cmd> on the server
568 """execute <cmd> on the server
568
569
569 The command is expected to return a simple string.
570 The command is expected to return a simple string.
570
571
571 returns the server reply as a string."""
572 returns the server reply as a string."""
572 raise NotImplementedError()
573 raise NotImplementedError()
573
574
574 def _callstream(self, cmd, **args):
575 def _callstream(self, cmd, **args):
575 """execute <cmd> on the server
576 """execute <cmd> on the server
576
577
577 The command is expected to return a stream. Note that if the
578 The command is expected to return a stream. Note that if the
578 command doesn't return a stream, _callstream behaves
579 command doesn't return a stream, _callstream behaves
579 differently for ssh and http peers.
580 differently for ssh and http peers.
580
581
581 returns the server reply as a file like object.
582 returns the server reply as a file like object.
582 """
583 """
583 raise NotImplementedError()
584 raise NotImplementedError()
584
585
585 def _callcompressable(self, cmd, **args):
586 def _callcompressable(self, cmd, **args):
586 """execute <cmd> on the server
587 """execute <cmd> on the server
587
588
588 The command is expected to return a stream.
589 The command is expected to return a stream.
589
590
590 The stream may have been compressed in some implementations. This
591 The stream may have been compressed in some implementations. This
591 function takes care of the decompression. This is the only difference
592 function takes care of the decompression. This is the only difference
592 with _callstream.
593 with _callstream.
593
594
594 returns the server reply as a file like object.
595 returns the server reply as a file like object.
595 """
596 """
596 raise NotImplementedError()
597 raise NotImplementedError()
597
598
598 def _callpush(self, cmd, fp, **args):
599 def _callpush(self, cmd, fp, **args):
599 """execute a <cmd> on server
600 """execute a <cmd> on server
600
601
601 The command is expected to be related to a push. Push has a special
602 The command is expected to be related to a push. Push has a special
602 return method.
603 return method.
603
604
604 returns the server reply as a (ret, output) tuple. ret is either
605 returns the server reply as a (ret, output) tuple. ret is either
605 empty (error) or a stringified int.
606 empty (error) or a stringified int.
606 """
607 """
607 raise NotImplementedError()
608 raise NotImplementedError()
608
609
609 def _calltwowaystream(self, cmd, fp, **args):
610 def _calltwowaystream(self, cmd, fp, **args):
610 """execute <cmd> on server
611 """execute <cmd> on server
611
612
612 The command will send a stream to the server and get a stream in reply.
613 The command will send a stream to the server and get a stream in reply.
613 """
614 """
614 raise NotImplementedError()
615 raise NotImplementedError()
615
616
616 def _abort(self, exception):
617 def _abort(self, exception):
617 """clearly abort the wire protocol connection and raise the exception
618 """clearly abort the wire protocol connection and raise the exception
618 """
619 """
619 raise NotImplementedError()
620 raise NotImplementedError()
@@ -1,534 +1,534 b''
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10
10
11 from .i18n import _
11 from .i18n import _
12 from .thirdparty import (
12 from .thirdparty import (
13 cbor,
13 cbor,
14 )
14 )
15 from .thirdparty.zope import (
16 interface as zi,
17 )
18 from . import (
15 from . import (
19 encoding,
16 encoding,
20 error,
17 error,
21 pycompat,
18 pycompat,
22 streamclone,
19 streamclone,
23 util,
20 util,
24 wireprotoframing,
21 wireprotoframing,
25 wireprototypes,
22 wireprototypes,
26 )
23 )
24 from .utils import (
25 interfaceutil,
26 )
27
27
28 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
28 FRAMINGTYPE = b'application/mercurial-exp-framing-0005'
29
29
30 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
30 HTTP_WIREPROTO_V2 = wireprototypes.HTTP_WIREPROTO_V2
31
31
32 COMMANDS = wireprototypes.commanddict()
32 COMMANDS = wireprototypes.commanddict()
33
33
34 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
34 def handlehttpv2request(rctx, req, res, checkperm, urlparts):
35 from .hgweb import common as hgwebcommon
35 from .hgweb import common as hgwebcommon
36
36
37 # URL space looks like: <permissions>/<command>, where <permission> can
37 # URL space looks like: <permissions>/<command>, where <permission> can
38 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
38 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
39
39
40 # Root URL does nothing meaningful... yet.
40 # Root URL does nothing meaningful... yet.
41 if not urlparts:
41 if not urlparts:
42 res.status = b'200 OK'
42 res.status = b'200 OK'
43 res.headers[b'Content-Type'] = b'text/plain'
43 res.headers[b'Content-Type'] = b'text/plain'
44 res.setbodybytes(_('HTTP version 2 API handler'))
44 res.setbodybytes(_('HTTP version 2 API handler'))
45 return
45 return
46
46
47 if len(urlparts) == 1:
47 if len(urlparts) == 1:
48 res.status = b'404 Not Found'
48 res.status = b'404 Not Found'
49 res.headers[b'Content-Type'] = b'text/plain'
49 res.headers[b'Content-Type'] = b'text/plain'
50 res.setbodybytes(_('do not know how to process %s\n') %
50 res.setbodybytes(_('do not know how to process %s\n') %
51 req.dispatchpath)
51 req.dispatchpath)
52 return
52 return
53
53
54 permission, command = urlparts[0:2]
54 permission, command = urlparts[0:2]
55
55
56 if permission not in (b'ro', b'rw'):
56 if permission not in (b'ro', b'rw'):
57 res.status = b'404 Not Found'
57 res.status = b'404 Not Found'
58 res.headers[b'Content-Type'] = b'text/plain'
58 res.headers[b'Content-Type'] = b'text/plain'
59 res.setbodybytes(_('unknown permission: %s') % permission)
59 res.setbodybytes(_('unknown permission: %s') % permission)
60 return
60 return
61
61
62 if req.method != 'POST':
62 if req.method != 'POST':
63 res.status = b'405 Method Not Allowed'
63 res.status = b'405 Method Not Allowed'
64 res.headers[b'Allow'] = b'POST'
64 res.headers[b'Allow'] = b'POST'
65 res.setbodybytes(_('commands require POST requests'))
65 res.setbodybytes(_('commands require POST requests'))
66 return
66 return
67
67
68 # At some point we'll want to use our own API instead of recycling the
68 # At some point we'll want to use our own API instead of recycling the
69 # behavior of version 1 of the wire protocol...
69 # behavior of version 1 of the wire protocol...
70 # TODO return reasonable responses - not responses that overload the
70 # TODO return reasonable responses - not responses that overload the
71 # HTTP status line message for error reporting.
71 # HTTP status line message for error reporting.
72 try:
72 try:
73 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
73 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
74 except hgwebcommon.ErrorResponse as e:
74 except hgwebcommon.ErrorResponse as e:
75 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
75 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
76 for k, v in e.headers:
76 for k, v in e.headers:
77 res.headers[k] = v
77 res.headers[k] = v
78 res.setbodybytes('permission denied')
78 res.setbodybytes('permission denied')
79 return
79 return
80
80
81 # We have a special endpoint to reflect the request back at the client.
81 # We have a special endpoint to reflect the request back at the client.
82 if command == b'debugreflect':
82 if command == b'debugreflect':
83 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
83 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
84 return
84 return
85
85
86 # Extra commands that we handle that aren't really wire protocol
86 # Extra commands that we handle that aren't really wire protocol
87 # commands. Think extra hard before making this hackery available to
87 # commands. Think extra hard before making this hackery available to
88 # extension.
88 # extension.
89 extracommands = {'multirequest'}
89 extracommands = {'multirequest'}
90
90
91 if command not in COMMANDS and command not in extracommands:
91 if command not in COMMANDS and command not in extracommands:
92 res.status = b'404 Not Found'
92 res.status = b'404 Not Found'
93 res.headers[b'Content-Type'] = b'text/plain'
93 res.headers[b'Content-Type'] = b'text/plain'
94 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
94 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
95 return
95 return
96
96
97 repo = rctx.repo
97 repo = rctx.repo
98 ui = repo.ui
98 ui = repo.ui
99
99
100 proto = httpv2protocolhandler(req, ui)
100 proto = httpv2protocolhandler(req, ui)
101
101
102 if (not COMMANDS.commandavailable(command, proto)
102 if (not COMMANDS.commandavailable(command, proto)
103 and command not in extracommands):
103 and command not in extracommands):
104 res.status = b'404 Not Found'
104 res.status = b'404 Not Found'
105 res.headers[b'Content-Type'] = b'text/plain'
105 res.headers[b'Content-Type'] = b'text/plain'
106 res.setbodybytes(_('invalid wire protocol command: %s') % command)
106 res.setbodybytes(_('invalid wire protocol command: %s') % command)
107 return
107 return
108
108
109 # TODO consider cases where proxies may add additional Accept headers.
109 # TODO consider cases where proxies may add additional Accept headers.
110 if req.headers.get(b'Accept') != FRAMINGTYPE:
110 if req.headers.get(b'Accept') != FRAMINGTYPE:
111 res.status = b'406 Not Acceptable'
111 res.status = b'406 Not Acceptable'
112 res.headers[b'Content-Type'] = b'text/plain'
112 res.headers[b'Content-Type'] = b'text/plain'
113 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
113 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
114 % FRAMINGTYPE)
114 % FRAMINGTYPE)
115 return
115 return
116
116
117 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
117 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
118 res.status = b'415 Unsupported Media Type'
118 res.status = b'415 Unsupported Media Type'
119 # TODO we should send a response with appropriate media type,
119 # TODO we should send a response with appropriate media type,
120 # since client does Accept it.
120 # since client does Accept it.
121 res.headers[b'Content-Type'] = b'text/plain'
121 res.headers[b'Content-Type'] = b'text/plain'
122 res.setbodybytes(_('client MUST send Content-Type header with '
122 res.setbodybytes(_('client MUST send Content-Type header with '
123 'value: %s\n') % FRAMINGTYPE)
123 'value: %s\n') % FRAMINGTYPE)
124 return
124 return
125
125
126 _processhttpv2request(ui, repo, req, res, permission, command, proto)
126 _processhttpv2request(ui, repo, req, res, permission, command, proto)
127
127
128 def _processhttpv2reflectrequest(ui, repo, req, res):
128 def _processhttpv2reflectrequest(ui, repo, req, res):
129 """Reads unified frame protocol request and dumps out state to client.
129 """Reads unified frame protocol request and dumps out state to client.
130
130
131 This special endpoint can be used to help debug the wire protocol.
131 This special endpoint can be used to help debug the wire protocol.
132
132
133 Instead of routing the request through the normal dispatch mechanism,
133 Instead of routing the request through the normal dispatch mechanism,
134 we instead read all frames, decode them, and feed them into our state
134 we instead read all frames, decode them, and feed them into our state
135 tracker. We then dump the log of all that activity back out to the
135 tracker. We then dump the log of all that activity back out to the
136 client.
136 client.
137 """
137 """
138 import json
138 import json
139
139
140 # Reflection APIs have a history of being abused, accidentally disclosing
140 # Reflection APIs have a history of being abused, accidentally disclosing
141 # sensitive data, etc. So we have a config knob.
141 # sensitive data, etc. So we have a config knob.
142 if not ui.configbool('experimental', 'web.api.debugreflect'):
142 if not ui.configbool('experimental', 'web.api.debugreflect'):
143 res.status = b'404 Not Found'
143 res.status = b'404 Not Found'
144 res.headers[b'Content-Type'] = b'text/plain'
144 res.headers[b'Content-Type'] = b'text/plain'
145 res.setbodybytes(_('debugreflect service not available'))
145 res.setbodybytes(_('debugreflect service not available'))
146 return
146 return
147
147
148 # We assume we have a unified framing protocol request body.
148 # We assume we have a unified framing protocol request body.
149
149
150 reactor = wireprotoframing.serverreactor()
150 reactor = wireprotoframing.serverreactor()
151 states = []
151 states = []
152
152
153 while True:
153 while True:
154 frame = wireprotoframing.readframe(req.bodyfh)
154 frame = wireprotoframing.readframe(req.bodyfh)
155
155
156 if not frame:
156 if not frame:
157 states.append(b'received: <no frame>')
157 states.append(b'received: <no frame>')
158 break
158 break
159
159
160 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
160 states.append(b'received: %d %d %d %s' % (frame.typeid, frame.flags,
161 frame.requestid,
161 frame.requestid,
162 frame.payload))
162 frame.payload))
163
163
164 action, meta = reactor.onframerecv(frame)
164 action, meta = reactor.onframerecv(frame)
165 states.append(json.dumps((action, meta), sort_keys=True,
165 states.append(json.dumps((action, meta), sort_keys=True,
166 separators=(', ', ': ')))
166 separators=(', ', ': ')))
167
167
168 action, meta = reactor.oninputeof()
168 action, meta = reactor.oninputeof()
169 meta['action'] = action
169 meta['action'] = action
170 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
170 states.append(json.dumps(meta, sort_keys=True, separators=(', ',': ')))
171
171
172 res.status = b'200 OK'
172 res.status = b'200 OK'
173 res.headers[b'Content-Type'] = b'text/plain'
173 res.headers[b'Content-Type'] = b'text/plain'
174 res.setbodybytes(b'\n'.join(states))
174 res.setbodybytes(b'\n'.join(states))
175
175
176 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
176 def _processhttpv2request(ui, repo, req, res, authedperm, reqcommand, proto):
177 """Post-validation handler for HTTPv2 requests.
177 """Post-validation handler for HTTPv2 requests.
178
178
179 Called when the HTTP request contains unified frame-based protocol
179 Called when the HTTP request contains unified frame-based protocol
180 frames for evaluation.
180 frames for evaluation.
181 """
181 """
182 # TODO Some HTTP clients are full duplex and can receive data before
182 # TODO Some HTTP clients are full duplex and can receive data before
183 # the entire request is transmitted. Figure out a way to indicate support
183 # the entire request is transmitted. Figure out a way to indicate support
184 # for that so we can opt into full duplex mode.
184 # for that so we can opt into full duplex mode.
185 reactor = wireprotoframing.serverreactor(deferoutput=True)
185 reactor = wireprotoframing.serverreactor(deferoutput=True)
186 seencommand = False
186 seencommand = False
187
187
188 outstream = reactor.makeoutputstream()
188 outstream = reactor.makeoutputstream()
189
189
190 while True:
190 while True:
191 frame = wireprotoframing.readframe(req.bodyfh)
191 frame = wireprotoframing.readframe(req.bodyfh)
192 if not frame:
192 if not frame:
193 break
193 break
194
194
195 action, meta = reactor.onframerecv(frame)
195 action, meta = reactor.onframerecv(frame)
196
196
197 if action == 'wantframe':
197 if action == 'wantframe':
198 # Need more data before we can do anything.
198 # Need more data before we can do anything.
199 continue
199 continue
200 elif action == 'runcommand':
200 elif action == 'runcommand':
201 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
201 sentoutput = _httpv2runcommand(ui, repo, req, res, authedperm,
202 reqcommand, reactor, outstream,
202 reqcommand, reactor, outstream,
203 meta, issubsequent=seencommand)
203 meta, issubsequent=seencommand)
204
204
205 if sentoutput:
205 if sentoutput:
206 return
206 return
207
207
208 seencommand = True
208 seencommand = True
209
209
210 elif action == 'error':
210 elif action == 'error':
211 # TODO define proper error mechanism.
211 # TODO define proper error mechanism.
212 res.status = b'200 OK'
212 res.status = b'200 OK'
213 res.headers[b'Content-Type'] = b'text/plain'
213 res.headers[b'Content-Type'] = b'text/plain'
214 res.setbodybytes(meta['message'] + b'\n')
214 res.setbodybytes(meta['message'] + b'\n')
215 return
215 return
216 else:
216 else:
217 raise error.ProgrammingError(
217 raise error.ProgrammingError(
218 'unhandled action from frame processor: %s' % action)
218 'unhandled action from frame processor: %s' % action)
219
219
220 action, meta = reactor.oninputeof()
220 action, meta = reactor.oninputeof()
221 if action == 'sendframes':
221 if action == 'sendframes':
222 # We assume we haven't started sending the response yet. If we're
222 # We assume we haven't started sending the response yet. If we're
223 # wrong, the response type will raise an exception.
223 # wrong, the response type will raise an exception.
224 res.status = b'200 OK'
224 res.status = b'200 OK'
225 res.headers[b'Content-Type'] = FRAMINGTYPE
225 res.headers[b'Content-Type'] = FRAMINGTYPE
226 res.setbodygen(meta['framegen'])
226 res.setbodygen(meta['framegen'])
227 elif action == 'noop':
227 elif action == 'noop':
228 pass
228 pass
229 else:
229 else:
230 raise error.ProgrammingError('unhandled action from frame processor: %s'
230 raise error.ProgrammingError('unhandled action from frame processor: %s'
231 % action)
231 % action)
232
232
233 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
233 def _httpv2runcommand(ui, repo, req, res, authedperm, reqcommand, reactor,
234 outstream, command, issubsequent):
234 outstream, command, issubsequent):
235 """Dispatch a wire protocol command made from HTTPv2 requests.
235 """Dispatch a wire protocol command made from HTTPv2 requests.
236
236
237 The authenticated permission (``authedperm``) along with the original
237 The authenticated permission (``authedperm``) along with the original
238 command from the URL (``reqcommand``) are passed in.
238 command from the URL (``reqcommand``) are passed in.
239 """
239 """
240 # We already validated that the session has permissions to perform the
240 # We already validated that the session has permissions to perform the
241 # actions in ``authedperm``. In the unified frame protocol, the canonical
241 # actions in ``authedperm``. In the unified frame protocol, the canonical
242 # command to run is expressed in a frame. However, the URL also requested
242 # command to run is expressed in a frame. However, the URL also requested
243 # to run a specific command. We need to be careful that the command we
243 # to run a specific command. We need to be careful that the command we
244 # run doesn't have permissions requirements greater than what was granted
244 # run doesn't have permissions requirements greater than what was granted
245 # by ``authedperm``.
245 # by ``authedperm``.
246 #
246 #
247 # Our rule for this is we only allow one command per HTTP request and
247 # Our rule for this is we only allow one command per HTTP request and
248 # that command must match the command in the URL. However, we make
248 # that command must match the command in the URL. However, we make
249 # an exception for the ``multirequest`` URL. This URL is allowed to
249 # an exception for the ``multirequest`` URL. This URL is allowed to
250 # execute multiple commands. We double check permissions of each command
250 # execute multiple commands. We double check permissions of each command
251 # as it is invoked to ensure there is no privilege escalation.
251 # as it is invoked to ensure there is no privilege escalation.
252 # TODO consider allowing multiple commands to regular command URLs
252 # TODO consider allowing multiple commands to regular command URLs
253 # iff each command is the same.
253 # iff each command is the same.
254
254
255 proto = httpv2protocolhandler(req, ui, args=command['args'])
255 proto = httpv2protocolhandler(req, ui, args=command['args'])
256
256
257 if reqcommand == b'multirequest':
257 if reqcommand == b'multirequest':
258 if not COMMANDS.commandavailable(command['command'], proto):
258 if not COMMANDS.commandavailable(command['command'], proto):
259 # TODO proper error mechanism
259 # TODO proper error mechanism
260 res.status = b'200 OK'
260 res.status = b'200 OK'
261 res.headers[b'Content-Type'] = b'text/plain'
261 res.headers[b'Content-Type'] = b'text/plain'
262 res.setbodybytes(_('wire protocol command not available: %s') %
262 res.setbodybytes(_('wire protocol command not available: %s') %
263 command['command'])
263 command['command'])
264 return True
264 return True
265
265
266 # TODO don't use assert here, since it may be elided by -O.
266 # TODO don't use assert here, since it may be elided by -O.
267 assert authedperm in (b'ro', b'rw')
267 assert authedperm in (b'ro', b'rw')
268 wirecommand = COMMANDS[command['command']]
268 wirecommand = COMMANDS[command['command']]
269 assert wirecommand.permission in ('push', 'pull')
269 assert wirecommand.permission in ('push', 'pull')
270
270
271 if authedperm == b'ro' and wirecommand.permission != 'pull':
271 if authedperm == b'ro' and wirecommand.permission != 'pull':
272 # TODO proper error mechanism
272 # TODO proper error mechanism
273 res.status = b'403 Forbidden'
273 res.status = b'403 Forbidden'
274 res.headers[b'Content-Type'] = b'text/plain'
274 res.headers[b'Content-Type'] = b'text/plain'
275 res.setbodybytes(_('insufficient permissions to execute '
275 res.setbodybytes(_('insufficient permissions to execute '
276 'command: %s') % command['command'])
276 'command: %s') % command['command'])
277 return True
277 return True
278
278
279 # TODO should we also call checkperm() here? Maybe not if we're going
279 # TODO should we also call checkperm() here? Maybe not if we're going
280 # to overhaul that API. The granted scope from the URL check should
280 # to overhaul that API. The granted scope from the URL check should
281 # be good enough.
281 # be good enough.
282
282
283 else:
283 else:
284 # Don't allow multiple commands outside of ``multirequest`` URL.
284 # Don't allow multiple commands outside of ``multirequest`` URL.
285 if issubsequent:
285 if issubsequent:
286 # TODO proper error mechanism
286 # TODO proper error mechanism
287 res.status = b'200 OK'
287 res.status = b'200 OK'
288 res.headers[b'Content-Type'] = b'text/plain'
288 res.headers[b'Content-Type'] = b'text/plain'
289 res.setbodybytes(_('multiple commands cannot be issued to this '
289 res.setbodybytes(_('multiple commands cannot be issued to this '
290 'URL'))
290 'URL'))
291 return True
291 return True
292
292
293 if reqcommand != command['command']:
293 if reqcommand != command['command']:
294 # TODO define proper error mechanism
294 # TODO define proper error mechanism
295 res.status = b'200 OK'
295 res.status = b'200 OK'
296 res.headers[b'Content-Type'] = b'text/plain'
296 res.headers[b'Content-Type'] = b'text/plain'
297 res.setbodybytes(_('command in frame must match command in URL'))
297 res.setbodybytes(_('command in frame must match command in URL'))
298 return True
298 return True
299
299
300 rsp = dispatch(repo, proto, command['command'])
300 rsp = dispatch(repo, proto, command['command'])
301
301
302 res.status = b'200 OK'
302 res.status = b'200 OK'
303 res.headers[b'Content-Type'] = FRAMINGTYPE
303 res.headers[b'Content-Type'] = FRAMINGTYPE
304
304
305 if isinstance(rsp, wireprototypes.cborresponse):
305 if isinstance(rsp, wireprototypes.cborresponse):
306 encoded = cbor.dumps(rsp.value, canonical=True)
306 encoded = cbor.dumps(rsp.value, canonical=True)
307 action, meta = reactor.oncommandresponseready(outstream,
307 action, meta = reactor.oncommandresponseready(outstream,
308 command['requestid'],
308 command['requestid'],
309 encoded)
309 encoded)
310 elif isinstance(rsp, wireprototypes.v2streamingresponse):
310 elif isinstance(rsp, wireprototypes.v2streamingresponse):
311 action, meta = reactor.oncommandresponsereadygen(outstream,
311 action, meta = reactor.oncommandresponsereadygen(outstream,
312 command['requestid'],
312 command['requestid'],
313 rsp.gen)
313 rsp.gen)
314 elif isinstance(rsp, wireprototypes.v2errorresponse):
314 elif isinstance(rsp, wireprototypes.v2errorresponse):
315 action, meta = reactor.oncommanderror(outstream,
315 action, meta = reactor.oncommanderror(outstream,
316 command['requestid'],
316 command['requestid'],
317 rsp.message,
317 rsp.message,
318 rsp.args)
318 rsp.args)
319 else:
319 else:
320 action, meta = reactor.onservererror(
320 action, meta = reactor.onservererror(
321 _('unhandled response type from wire proto command'))
321 _('unhandled response type from wire proto command'))
322
322
323 if action == 'sendframes':
323 if action == 'sendframes':
324 res.setbodygen(meta['framegen'])
324 res.setbodygen(meta['framegen'])
325 return True
325 return True
326 elif action == 'noop':
326 elif action == 'noop':
327 return False
327 return False
328 else:
328 else:
329 raise error.ProgrammingError('unhandled event from reactor: %s' %
329 raise error.ProgrammingError('unhandled event from reactor: %s' %
330 action)
330 action)
331
331
332 def getdispatchrepo(repo, proto, command):
332 def getdispatchrepo(repo, proto, command):
333 return repo.filtered('served')
333 return repo.filtered('served')
334
334
335 def dispatch(repo, proto, command):
335 def dispatch(repo, proto, command):
336 repo = getdispatchrepo(repo, proto, command)
336 repo = getdispatchrepo(repo, proto, command)
337
337
338 func, spec = COMMANDS[command]
338 func, spec = COMMANDS[command]
339 args = proto.getargs(spec)
339 args = proto.getargs(spec)
340
340
341 return func(repo, proto, **args)
341 return func(repo, proto, **args)
342
342
343 @zi.implementer(wireprototypes.baseprotocolhandler)
343 @interfaceutil.implementer(wireprototypes.baseprotocolhandler)
344 class httpv2protocolhandler(object):
344 class httpv2protocolhandler(object):
345 def __init__(self, req, ui, args=None):
345 def __init__(self, req, ui, args=None):
346 self._req = req
346 self._req = req
347 self._ui = ui
347 self._ui = ui
348 self._args = args
348 self._args = args
349
349
350 @property
350 @property
351 def name(self):
351 def name(self):
352 return HTTP_WIREPROTO_V2
352 return HTTP_WIREPROTO_V2
353
353
354 def getargs(self, args):
354 def getargs(self, args):
355 data = {}
355 data = {}
356 for k, typ in args.items():
356 for k, typ in args.items():
357 if k == '*':
357 if k == '*':
358 raise NotImplementedError('do not support * args')
358 raise NotImplementedError('do not support * args')
359 elif k in self._args:
359 elif k in self._args:
360 # TODO consider validating value types.
360 # TODO consider validating value types.
361 data[k] = self._args[k]
361 data[k] = self._args[k]
362
362
363 return data
363 return data
364
364
365 def getprotocaps(self):
365 def getprotocaps(self):
366 # Protocol capabilities are currently not implemented for HTTP V2.
366 # Protocol capabilities are currently not implemented for HTTP V2.
367 return set()
367 return set()
368
368
369 def getpayload(self):
369 def getpayload(self):
370 raise NotImplementedError
370 raise NotImplementedError
371
371
372 @contextlib.contextmanager
372 @contextlib.contextmanager
373 def mayberedirectstdio(self):
373 def mayberedirectstdio(self):
374 raise NotImplementedError
374 raise NotImplementedError
375
375
376 def client(self):
376 def client(self):
377 raise NotImplementedError
377 raise NotImplementedError
378
378
379 def addcapabilities(self, repo, caps):
379 def addcapabilities(self, repo, caps):
380 return caps
380 return caps
381
381
382 def checkperm(self, perm):
382 def checkperm(self, perm):
383 raise NotImplementedError
383 raise NotImplementedError
384
384
385 def httpv2apidescriptor(req, repo):
385 def httpv2apidescriptor(req, repo):
386 proto = httpv2protocolhandler(req, repo.ui)
386 proto = httpv2protocolhandler(req, repo.ui)
387
387
388 return _capabilitiesv2(repo, proto)
388 return _capabilitiesv2(repo, proto)
389
389
390 def _capabilitiesv2(repo, proto):
390 def _capabilitiesv2(repo, proto):
391 """Obtain the set of capabilities for version 2 transports.
391 """Obtain the set of capabilities for version 2 transports.
392
392
393 These capabilities are distinct from the capabilities for version 1
393 These capabilities are distinct from the capabilities for version 1
394 transports.
394 transports.
395 """
395 """
396 compression = []
396 compression = []
397 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
397 for engine in wireprototypes.supportedcompengines(repo.ui, util.SERVERROLE):
398 compression.append({
398 compression.append({
399 b'name': engine.wireprotosupport().name,
399 b'name': engine.wireprotosupport().name,
400 })
400 })
401
401
402 caps = {
402 caps = {
403 'commands': {},
403 'commands': {},
404 'compression': compression,
404 'compression': compression,
405 'framingmediatypes': [FRAMINGTYPE],
405 'framingmediatypes': [FRAMINGTYPE],
406 }
406 }
407
407
408 for command, entry in COMMANDS.items():
408 for command, entry in COMMANDS.items():
409 caps['commands'][command] = {
409 caps['commands'][command] = {
410 'args': entry.args,
410 'args': entry.args,
411 'permissions': [entry.permission],
411 'permissions': [entry.permission],
412 }
412 }
413
413
414 if streamclone.allowservergeneration(repo):
414 if streamclone.allowservergeneration(repo):
415 caps['rawrepoformats'] = sorted(repo.requirements &
415 caps['rawrepoformats'] = sorted(repo.requirements &
416 repo.supportedformats)
416 repo.supportedformats)
417
417
418 return proto.addcapabilities(repo, caps)
418 return proto.addcapabilities(repo, caps)
419
419
420 def wireprotocommand(name, args=None, permission='push'):
420 def wireprotocommand(name, args=None, permission='push'):
421 """Decorator to declare a wire protocol command.
421 """Decorator to declare a wire protocol command.
422
422
423 ``name`` is the name of the wire protocol command being provided.
423 ``name`` is the name of the wire protocol command being provided.
424
424
425 ``args`` is a dict of argument names to example values.
425 ``args`` is a dict of argument names to example values.
426
426
427 ``permission`` defines the permission type needed to run this command.
427 ``permission`` defines the permission type needed to run this command.
428 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
428 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
429 respectively. Default is to assume command requires ``push`` permissions
429 respectively. Default is to assume command requires ``push`` permissions
430 because otherwise commands not declaring their permissions could modify
430 because otherwise commands not declaring their permissions could modify
431 a repository that is supposed to be read-only.
431 a repository that is supposed to be read-only.
432 """
432 """
433 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
433 transports = {k for k, v in wireprototypes.TRANSPORTS.items()
434 if v['version'] == 2}
434 if v['version'] == 2}
435
435
436 if permission not in ('push', 'pull'):
436 if permission not in ('push', 'pull'):
437 raise error.ProgrammingError('invalid wire protocol permission; '
437 raise error.ProgrammingError('invalid wire protocol permission; '
438 'got %s; expected "push" or "pull"' %
438 'got %s; expected "push" or "pull"' %
439 permission)
439 permission)
440
440
441 if args is None:
441 if args is None:
442 args = {}
442 args = {}
443
443
444 if not isinstance(args, dict):
444 if not isinstance(args, dict):
445 raise error.ProgrammingError('arguments for version 2 commands '
445 raise error.ProgrammingError('arguments for version 2 commands '
446 'must be declared as dicts')
446 'must be declared as dicts')
447
447
448 def register(func):
448 def register(func):
449 if name in COMMANDS:
449 if name in COMMANDS:
450 raise error.ProgrammingError('%s command already registered '
450 raise error.ProgrammingError('%s command already registered '
451 'for version 2' % name)
451 'for version 2' % name)
452
452
453 COMMANDS[name] = wireprototypes.commandentry(
453 COMMANDS[name] = wireprototypes.commandentry(
454 func, args=args, transports=transports, permission=permission)
454 func, args=args, transports=transports, permission=permission)
455
455
456 return func
456 return func
457
457
458 return register
458 return register
459
459
460 @wireprotocommand('branchmap', permission='pull')
460 @wireprotocommand('branchmap', permission='pull')
461 def branchmapv2(repo, proto):
461 def branchmapv2(repo, proto):
462 branchmap = {encoding.fromlocal(k): v
462 branchmap = {encoding.fromlocal(k): v
463 for k, v in repo.branchmap().iteritems()}
463 for k, v in repo.branchmap().iteritems()}
464
464
465 return wireprototypes.cborresponse(branchmap)
465 return wireprototypes.cborresponse(branchmap)
466
466
467 @wireprotocommand('capabilities', permission='pull')
467 @wireprotocommand('capabilities', permission='pull')
468 def capabilitiesv2(repo, proto):
468 def capabilitiesv2(repo, proto):
469 caps = _capabilitiesv2(repo, proto)
469 caps = _capabilitiesv2(repo, proto)
470
470
471 return wireprototypes.cborresponse(caps)
471 return wireprototypes.cborresponse(caps)
472
472
473 @wireprotocommand('heads',
473 @wireprotocommand('heads',
474 args={
474 args={
475 'publiconly': False,
475 'publiconly': False,
476 },
476 },
477 permission='pull')
477 permission='pull')
478 def headsv2(repo, proto, publiconly=False):
478 def headsv2(repo, proto, publiconly=False):
479 if publiconly:
479 if publiconly:
480 repo = repo.filtered('immutable')
480 repo = repo.filtered('immutable')
481
481
482 return wireprototypes.cborresponse(repo.heads())
482 return wireprototypes.cborresponse(repo.heads())
483
483
484 @wireprotocommand('known',
484 @wireprotocommand('known',
485 args={
485 args={
486 'nodes': [b'deadbeef'],
486 'nodes': [b'deadbeef'],
487 },
487 },
488 permission='pull')
488 permission='pull')
489 def knownv2(repo, proto, nodes=None):
489 def knownv2(repo, proto, nodes=None):
490 nodes = nodes or []
490 nodes = nodes or []
491 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
491 result = b''.join(b'1' if n else b'0' for n in repo.known(nodes))
492 return wireprototypes.cborresponse(result)
492 return wireprototypes.cborresponse(result)
493
493
494 @wireprotocommand('listkeys',
494 @wireprotocommand('listkeys',
495 args={
495 args={
496 'namespace': b'ns',
496 'namespace': b'ns',
497 },
497 },
498 permission='pull')
498 permission='pull')
499 def listkeysv2(repo, proto, namespace=None):
499 def listkeysv2(repo, proto, namespace=None):
500 keys = repo.listkeys(encoding.tolocal(namespace))
500 keys = repo.listkeys(encoding.tolocal(namespace))
501 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
501 keys = {encoding.fromlocal(k): encoding.fromlocal(v)
502 for k, v in keys.iteritems()}
502 for k, v in keys.iteritems()}
503
503
504 return wireprototypes.cborresponse(keys)
504 return wireprototypes.cborresponse(keys)
505
505
506 @wireprotocommand('lookup',
506 @wireprotocommand('lookup',
507 args={
507 args={
508 'key': b'foo',
508 'key': b'foo',
509 },
509 },
510 permission='pull')
510 permission='pull')
511 def lookupv2(repo, proto, key):
511 def lookupv2(repo, proto, key):
512 key = encoding.tolocal(key)
512 key = encoding.tolocal(key)
513
513
514 # TODO handle exception.
514 # TODO handle exception.
515 node = repo.lookup(key)
515 node = repo.lookup(key)
516
516
517 return wireprototypes.cborresponse(node)
517 return wireprototypes.cborresponse(node)
518
518
519 @wireprotocommand('pushkey',
519 @wireprotocommand('pushkey',
520 args={
520 args={
521 'namespace': b'ns',
521 'namespace': b'ns',
522 'key': b'key',
522 'key': b'key',
523 'old': b'old',
523 'old': b'old',
524 'new': b'new',
524 'new': b'new',
525 },
525 },
526 permission='push')
526 permission='push')
527 def pushkeyv2(repo, proto, namespace, key, old, new):
527 def pushkeyv2(repo, proto, namespace, key, old, new):
528 # TODO handle ui output redirection
528 # TODO handle ui output redirection
529 r = repo.pushkey(encoding.tolocal(namespace),
529 r = repo.pushkey(encoding.tolocal(namespace),
530 encoding.tolocal(key),
530 encoding.tolocal(key),
531 encoding.tolocal(old),
531 encoding.tolocal(old),
532 encoding.tolocal(new))
532 encoding.tolocal(new))
533
533
534 return wireprototypes.cborresponse(r)
534 return wireprototypes.cborresponse(r)
@@ -1,160 +1,163 b''
1 # Test that certain objects conform to well-defined interfaces.
1 # Test that certain objects conform to well-defined interfaces.
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 from mercurial import encoding
6 encoding.environ[b'HGREALINTERFACES'] = b'1'
7
5 import os
8 import os
6
9
7 from mercurial.thirdparty.zope import (
10 from mercurial.thirdparty.zope import (
8 interface as zi,
11 interface as zi,
9 )
12 )
10 from mercurial.thirdparty.zope.interface import (
13 from mercurial.thirdparty.zope.interface import (
11 verify as ziverify,
14 verify as ziverify,
12 )
15 )
13 from mercurial import (
16 from mercurial import (
14 bundlerepo,
17 bundlerepo,
15 filelog,
18 filelog,
16 httppeer,
19 httppeer,
17 localrepo,
20 localrepo,
18 repository,
21 repository,
19 sshpeer,
22 sshpeer,
20 statichttprepo,
23 statichttprepo,
21 ui as uimod,
24 ui as uimod,
22 unionrepo,
25 unionrepo,
23 vfs as vfsmod,
26 vfs as vfsmod,
24 wireprotoserver,
27 wireprotoserver,
25 wireprototypes,
28 wireprototypes,
26 wireprotov1peer,
29 wireprotov1peer,
27 wireprotov2server,
30 wireprotov2server,
28 )
31 )
29
32
30 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
33 rootdir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
31
34
32 def checkzobject(o, allowextra=False):
35 def checkzobject(o, allowextra=False):
33 """Verify an object with a zope interface."""
36 """Verify an object with a zope interface."""
34 ifaces = zi.providedBy(o)
37 ifaces = zi.providedBy(o)
35 if not ifaces:
38 if not ifaces:
36 print('%r does not provide any zope interfaces' % o)
39 print('%r does not provide any zope interfaces' % o)
37 return
40 return
38
41
39 # Run zope.interface's built-in verification routine. This verifies that
42 # Run zope.interface's built-in verification routine. This verifies that
40 # everything that is supposed to be present is present.
43 # everything that is supposed to be present is present.
41 for iface in ifaces:
44 for iface in ifaces:
42 ziverify.verifyObject(iface, o)
45 ziverify.verifyObject(iface, o)
43
46
44 if allowextra:
47 if allowextra:
45 return
48 return
46
49
47 # Now verify that the object provides no extra public attributes that
50 # Now verify that the object provides no extra public attributes that
48 # aren't declared as part of interfaces.
51 # aren't declared as part of interfaces.
49 allowed = set()
52 allowed = set()
50 for iface in ifaces:
53 for iface in ifaces:
51 allowed |= set(iface.names(all=True))
54 allowed |= set(iface.names(all=True))
52
55
53 public = {a for a in dir(o) if not a.startswith('_')}
56 public = {a for a in dir(o) if not a.startswith('_')}
54
57
55 for attr in sorted(public - allowed):
58 for attr in sorted(public - allowed):
56 print('public attribute not declared in interfaces: %s.%s' % (
59 print('public attribute not declared in interfaces: %s.%s' % (
57 o.__class__.__name__, attr))
60 o.__class__.__name__, attr))
58
61
59 # Facilitates testing localpeer.
62 # Facilitates testing localpeer.
60 class dummyrepo(object):
63 class dummyrepo(object):
61 def __init__(self):
64 def __init__(self):
62 self.ui = uimod.ui()
65 self.ui = uimod.ui()
63 def filtered(self, name):
66 def filtered(self, name):
64 pass
67 pass
65 def _restrictcapabilities(self, caps):
68 def _restrictcapabilities(self, caps):
66 pass
69 pass
67
70
68 class dummyopener(object):
71 class dummyopener(object):
69 handlers = []
72 handlers = []
70
73
71 # Facilitates testing sshpeer without requiring a server.
74 # Facilitates testing sshpeer without requiring a server.
72 class badpeer(httppeer.httppeer):
75 class badpeer(httppeer.httppeer):
73 def __init__(self):
76 def __init__(self):
74 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
77 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
75 None)
78 None)
76 self.badattribute = True
79 self.badattribute = True
77
80
78 def badmethod(self):
81 def badmethod(self):
79 pass
82 pass
80
83
81 class dummypipe(object):
84 class dummypipe(object):
82 def close(self):
85 def close(self):
83 pass
86 pass
84
87
85 def main():
88 def main():
86 ui = uimod.ui()
89 ui = uimod.ui()
87 # Needed so we can open a local repo with obsstore without a warning.
90 # Needed so we can open a local repo with obsstore without a warning.
88 ui.setconfig('experimental', 'evolution.createmarkers', True)
91 ui.setconfig('experimental', 'evolution.createmarkers', True)
89
92
90 checkzobject(badpeer())
93 checkzobject(badpeer())
91
94
92 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
95 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
93 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
96 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
94
97
95 ziverify.verifyClass(repository.ipeerconnection,
98 ziverify.verifyClass(repository.ipeerconnection,
96 httppeer.httpv2peer)
99 httppeer.httpv2peer)
97 ziverify.verifyClass(repository.ipeercapabilities,
100 ziverify.verifyClass(repository.ipeercapabilities,
98 httppeer.httpv2peer)
101 httppeer.httpv2peer)
99 checkzobject(httppeer.httpv2peer(None, '', None, None, None, None))
102 checkzobject(httppeer.httpv2peer(None, '', None, None, None, None))
100
103
101 ziverify.verifyClass(repository.ipeerbase,
104 ziverify.verifyClass(repository.ipeerbase,
102 localrepo.localpeer)
105 localrepo.localpeer)
103 checkzobject(localrepo.localpeer(dummyrepo()))
106 checkzobject(localrepo.localpeer(dummyrepo()))
104
107
105 ziverify.verifyClass(repository.ipeercommandexecutor,
108 ziverify.verifyClass(repository.ipeercommandexecutor,
106 localrepo.localcommandexecutor)
109 localrepo.localcommandexecutor)
107 checkzobject(localrepo.localcommandexecutor(None))
110 checkzobject(localrepo.localcommandexecutor(None))
108
111
109 ziverify.verifyClass(repository.ipeercommandexecutor,
112 ziverify.verifyClass(repository.ipeercommandexecutor,
110 wireprotov1peer.peerexecutor)
113 wireprotov1peer.peerexecutor)
111 checkzobject(wireprotov1peer.peerexecutor(None))
114 checkzobject(wireprotov1peer.peerexecutor(None))
112
115
113 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
116 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
114 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
117 checkzobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
115 dummypipe(), None, None))
118 dummypipe(), None, None))
116
119
117 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
120 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
118 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
121 checkzobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
119 dummypipe(), None, None))
122 dummypipe(), None, None))
120
123
121 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
124 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
122 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
125 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
123
126
124 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
127 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
125 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
128 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
126
129
127 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
130 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
128 checkzobject(unionrepo.unionpeer(dummyrepo()))
131 checkzobject(unionrepo.unionpeer(dummyrepo()))
129
132
130 ziverify.verifyClass(repository.completelocalrepository,
133 ziverify.verifyClass(repository.completelocalrepository,
131 localrepo.localrepository)
134 localrepo.localrepository)
132 repo = localrepo.localrepository(ui, rootdir)
135 repo = localrepo.localrepository(ui, rootdir)
133 checkzobject(repo)
136 checkzobject(repo)
134
137
135 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
138 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
136 wireprotoserver.sshv1protocolhandler)
139 wireprotoserver.sshv1protocolhandler)
137 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
140 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
138 wireprotoserver.sshv2protocolhandler)
141 wireprotoserver.sshv2protocolhandler)
139 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
142 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
140 wireprotoserver.httpv1protocolhandler)
143 wireprotoserver.httpv1protocolhandler)
141 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
144 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
142 wireprotov2server.httpv2protocolhandler)
145 wireprotov2server.httpv2protocolhandler)
143
146
144 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
147 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
145 checkzobject(sshv1)
148 checkzobject(sshv1)
146 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
149 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
147 checkzobject(sshv2)
150 checkzobject(sshv2)
148
151
149 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
152 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
150 checkzobject(httpv1)
153 checkzobject(httpv1)
151 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
154 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
152 checkzobject(httpv2)
155 checkzobject(httpv2)
153
156
154 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
157 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
155
158
156 vfs = vfsmod.vfs('.')
159 vfs = vfsmod.vfs('.')
157 fl = filelog.filelog(vfs, 'dummy.i')
160 fl = filelog.filelog(vfs, 'dummy.i')
158 checkzobject(fl, allowextra=True)
161 checkzobject(fl, allowextra=True)
159
162
160 main()
163 main()
@@ -1,44 +1,45 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ import_checker="$TESTDIR"/../contrib/import-checker.py
4 $ import_checker="$TESTDIR"/../contrib/import-checker.py
5
5
6 $ cd "$TESTDIR"/..
6 $ cd "$TESTDIR"/..
7
7
8 There are a handful of cases here that require renaming a module so it
8 There are a handful of cases here that require renaming a module so it
9 doesn't overlap with a stdlib module name. There are also some cycles
9 doesn't overlap with a stdlib module name. There are also some cycles
10 here that we should still endeavor to fix, and some cycles will be
10 here that we should still endeavor to fix, and some cycles will be
11 hidden by deduplication algorithm in the cycle detector, so fixing
11 hidden by deduplication algorithm in the cycle detector, so fixing
12 these may expose other cycles.
12 these may expose other cycles.
13
13
14 Known-bad files are excluded by -X as some of them would produce unstable
14 Known-bad files are excluded by -X as some of them would produce unstable
15 outputs, which should be fixed later.
15 outputs, which should be fixed later.
16
16
17 $ testrepohg locate 'set:**.py or grep(r"^#!.*?python")' \
17 $ testrepohg locate 'set:**.py or grep(r"^#!.*?python")' \
18 > 'tests/**.t' \
18 > 'tests/**.t' \
19 > -X hgweb.cgi \
19 > -X hgweb.cgi \
20 > -X setup.py \
20 > -X setup.py \
21 > -X contrib/debugshell.py \
21 > -X contrib/debugshell.py \
22 > -X contrib/hgweb.fcgi \
22 > -X contrib/hgweb.fcgi \
23 > -X contrib/python-zstandard/ \
23 > -X contrib/python-zstandard/ \
24 > -X contrib/win32/hgwebdir_wsgi.py \
24 > -X contrib/win32/hgwebdir_wsgi.py \
25 > -X doc/gendoc.py \
25 > -X doc/gendoc.py \
26 > -X doc/hgmanpage.py \
26 > -X doc/hgmanpage.py \
27 > -X i18n/posplit \
27 > -X i18n/posplit \
28 > -X mercurial/thirdparty \
28 > -X mercurial/thirdparty \
29 > -X tests/hypothesishelpers.py \
29 > -X tests/hypothesishelpers.py \
30 > -X tests/test-check-interfaces.py \
30 > -X tests/test-commit-interactive.t \
31 > -X tests/test-commit-interactive.t \
31 > -X tests/test-contrib-check-code.t \
32 > -X tests/test-contrib-check-code.t \
32 > -X tests/test-demandimport.py \
33 > -X tests/test-demandimport.py \
33 > -X tests/test-extension.t \
34 > -X tests/test-extension.t \
34 > -X tests/test-hghave.t \
35 > -X tests/test-hghave.t \
35 > -X tests/test-hgweb-auth.py \
36 > -X tests/test-hgweb-auth.py \
36 > -X tests/test-hgweb-no-path-info.t \
37 > -X tests/test-hgweb-no-path-info.t \
37 > -X tests/test-hgweb-no-request-uri.t \
38 > -X tests/test-hgweb-no-request-uri.t \
38 > -X tests/test-hgweb-non-interactive.t \
39 > -X tests/test-hgweb-non-interactive.t \
39 > -X tests/test-hook.t \
40 > -X tests/test-hook.t \
40 > -X tests/test-import.t \
41 > -X tests/test-import.t \
41 > -X tests/test-imports-checker.t \
42 > -X tests/test-imports-checker.t \
42 > -X tests/test-lock.py \
43 > -X tests/test-lock.py \
43 > -X tests/test-verify-repo-operations.py \
44 > -X tests/test-verify-repo-operations.py \
44 > | sed 's-\\-/-g' | $PYTHON "$import_checker" -
45 > | sed 's-\\-/-g' | $PYTHON "$import_checker" -
General Comments 0
You need to be logged in to leave comments. Login now