##// END OF EJS Templates
thirdparty: move selectors2 module to where it should be
Yuya Nishihara -
r35245:414114a7 default
parent child Browse files
Show More
@@ -1,299 +1,296 b''
1 # __init__.py - Startup and module loading logic for Mercurial.
1 # __init__.py - Startup and module loading logic for Mercurial.
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import sys
10 import sys
11
11
12 # Allow 'from mercurial import demandimport' to keep working.
12 # Allow 'from mercurial import demandimport' to keep working.
13 import hgdemandimport
13 import hgdemandimport
14 demandimport = hgdemandimport
14 demandimport = hgdemandimport
15
15
16 __all__ = []
16 __all__ = []
17
17
18 # Python 3 uses a custom module loader that transforms source code between
18 # Python 3 uses a custom module loader that transforms source code between
19 # source file reading and compilation. This is done by registering a custom
19 # source file reading and compilation. This is done by registering a custom
20 # finder that changes the spec for Mercurial modules to use a custom loader.
20 # finder that changes the spec for Mercurial modules to use a custom loader.
21 if sys.version_info[0] >= 3:
21 if sys.version_info[0] >= 3:
22 import importlib
22 import importlib
23 import importlib.abc
23 import importlib.abc
24 import io
24 import io
25 import token
25 import token
26 import tokenize
26 import tokenize
27
27
28 class hgpathentryfinder(importlib.abc.MetaPathFinder):
28 class hgpathentryfinder(importlib.abc.MetaPathFinder):
29 """A sys.meta_path finder that uses a custom module loader."""
29 """A sys.meta_path finder that uses a custom module loader."""
30 def find_spec(self, fullname, path, target=None):
30 def find_spec(self, fullname, path, target=None):
31 # Only handle Mercurial-related modules.
31 # Only handle Mercurial-related modules.
32 if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
32 if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
33 return None
33 return None
34 # selectors2 is already dual-version clean, don't try and mangle it
35 if fullname.startswith('mercurial.selectors2'):
36 return None
37 # third-party packages are expected to be dual-version clean
34 # third-party packages are expected to be dual-version clean
38 if fullname.startswith('mercurial.thirdparty'):
35 if fullname.startswith('mercurial.thirdparty'):
39 return None
36 return None
40 # zstd is already dual-version clean, don't try and mangle it
37 # zstd is already dual-version clean, don't try and mangle it
41 if fullname.startswith('mercurial.zstd'):
38 if fullname.startswith('mercurial.zstd'):
42 return None
39 return None
43 # pywatchman is already dual-version clean, don't try and mangle it
40 # pywatchman is already dual-version clean, don't try and mangle it
44 if fullname.startswith('hgext.fsmonitor.pywatchman'):
41 if fullname.startswith('hgext.fsmonitor.pywatchman'):
45 return None
42 return None
46
43
47 # Try to find the module using other registered finders.
44 # Try to find the module using other registered finders.
48 spec = None
45 spec = None
49 for finder in sys.meta_path:
46 for finder in sys.meta_path:
50 if finder == self:
47 if finder == self:
51 continue
48 continue
52
49
53 spec = finder.find_spec(fullname, path, target=target)
50 spec = finder.find_spec(fullname, path, target=target)
54 if spec:
51 if spec:
55 break
52 break
56
53
57 # This is a Mercurial-related module but we couldn't find it
54 # This is a Mercurial-related module but we couldn't find it
58 # using the previously-registered finders. This likely means
55 # using the previously-registered finders. This likely means
59 # the module doesn't exist.
56 # the module doesn't exist.
60 if not spec:
57 if not spec:
61 return None
58 return None
62
59
63 # TODO need to support loaders from alternate specs, like zip
60 # TODO need to support loaders from alternate specs, like zip
64 # loaders.
61 # loaders.
65 loader = hgloader(spec.name, spec.origin)
62 loader = hgloader(spec.name, spec.origin)
66 # Can't use util.safehasattr here because that would require
63 # Can't use util.safehasattr here because that would require
67 # importing util, and we're in import code.
64 # importing util, and we're in import code.
68 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
65 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
69 # This is a nested loader (maybe a lazy loader?)
66 # This is a nested loader (maybe a lazy loader?)
70 spec.loader.loader = loader
67 spec.loader.loader = loader
71 else:
68 else:
72 spec.loader = loader
69 spec.loader = loader
73 return spec
70 return spec
74
71
75 def replacetokens(tokens, fullname):
72 def replacetokens(tokens, fullname):
76 """Transform a stream of tokens from raw to Python 3.
73 """Transform a stream of tokens from raw to Python 3.
77
74
78 It is called by the custom module loading machinery to rewrite
75 It is called by the custom module loading machinery to rewrite
79 source/tokens between source decoding and compilation.
76 source/tokens between source decoding and compilation.
80
77
81 Returns a generator of possibly rewritten tokens.
78 Returns a generator of possibly rewritten tokens.
82
79
83 The input token list may be mutated as part of processing. However,
80 The input token list may be mutated as part of processing. However,
84 its changes do not necessarily match the output token stream.
81 its changes do not necessarily match the output token stream.
85
82
86 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
83 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
87 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
84 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
88 """
85 """
89 futureimpline = False
86 futureimpline = False
90
87
91 # The following utility functions access the tokens list and i index of
88 # The following utility functions access the tokens list and i index of
92 # the for i, t enumerate(tokens) loop below
89 # the for i, t enumerate(tokens) loop below
93 def _isop(j, *o):
90 def _isop(j, *o):
94 """Assert that tokens[j] is an OP with one of the given values"""
91 """Assert that tokens[j] is an OP with one of the given values"""
95 try:
92 try:
96 return tokens[j].type == token.OP and tokens[j].string in o
93 return tokens[j].type == token.OP and tokens[j].string in o
97 except IndexError:
94 except IndexError:
98 return False
95 return False
99
96
100 def _findargnofcall(n):
97 def _findargnofcall(n):
101 """Find arg n of a call expression (start at 0)
98 """Find arg n of a call expression (start at 0)
102
99
103 Returns index of the first token of that argument, or None if
100 Returns index of the first token of that argument, or None if
104 there is not that many arguments.
101 there is not that many arguments.
105
102
106 Assumes that token[i + 1] is '('.
103 Assumes that token[i + 1] is '('.
107
104
108 """
105 """
109 nested = 0
106 nested = 0
110 for j in range(i + 2, len(tokens)):
107 for j in range(i + 2, len(tokens)):
111 if _isop(j, ')', ']', '}'):
108 if _isop(j, ')', ']', '}'):
112 # end of call, tuple, subscription or dict / set
109 # end of call, tuple, subscription or dict / set
113 nested -= 1
110 nested -= 1
114 if nested < 0:
111 if nested < 0:
115 return None
112 return None
116 elif n == 0:
113 elif n == 0:
117 # this is the starting position of arg
114 # this is the starting position of arg
118 return j
115 return j
119 elif _isop(j, '(', '[', '{'):
116 elif _isop(j, '(', '[', '{'):
120 nested += 1
117 nested += 1
121 elif _isop(j, ',') and nested == 0:
118 elif _isop(j, ',') and nested == 0:
122 n -= 1
119 n -= 1
123
120
124 return None
121 return None
125
122
126 def _ensureunicode(j):
123 def _ensureunicode(j):
127 """Make sure the token at j is a unicode string
124 """Make sure the token at j is a unicode string
128
125
129 This rewrites a string token to include the unicode literal prefix
126 This rewrites a string token to include the unicode literal prefix
130 so the string transformer won't add the byte prefix.
127 so the string transformer won't add the byte prefix.
131
128
132 Ignores tokens that are not strings. Assumes bounds checking has
129 Ignores tokens that are not strings. Assumes bounds checking has
133 already been done.
130 already been done.
134
131
135 """
132 """
136 st = tokens[j]
133 st = tokens[j]
137 if st.type == token.STRING and st.string.startswith(("'", '"')):
134 if st.type == token.STRING and st.string.startswith(("'", '"')):
138 tokens[j] = st._replace(string='u%s' % st.string)
135 tokens[j] = st._replace(string='u%s' % st.string)
139
136
140 for i, t in enumerate(tokens):
137 for i, t in enumerate(tokens):
141 # Convert most string literals to byte literals. String literals
138 # Convert most string literals to byte literals. String literals
142 # in Python 2 are bytes. String literals in Python 3 are unicode.
139 # in Python 2 are bytes. String literals in Python 3 are unicode.
143 # Most strings in Mercurial are bytes and unicode strings are rare.
140 # Most strings in Mercurial are bytes and unicode strings are rare.
144 # Rather than rewrite all string literals to use ``b''`` to indicate
141 # Rather than rewrite all string literals to use ``b''`` to indicate
145 # byte strings, we apply this token transformer to insert the ``b``
142 # byte strings, we apply this token transformer to insert the ``b``
146 # prefix nearly everywhere.
143 # prefix nearly everywhere.
147 if t.type == token.STRING:
144 if t.type == token.STRING:
148 s = t.string
145 s = t.string
149
146
150 # Preserve docstrings as string literals. This is inconsistent
147 # Preserve docstrings as string literals. This is inconsistent
151 # with regular unprefixed strings. However, the
148 # with regular unprefixed strings. However, the
152 # "from __future__" parsing (which allows a module docstring to
149 # "from __future__" parsing (which allows a module docstring to
153 # exist before it) doesn't properly handle the docstring if it
150 # exist before it) doesn't properly handle the docstring if it
154 # is b''' prefixed, leading to a SyntaxError. We leave all
151 # is b''' prefixed, leading to a SyntaxError. We leave all
155 # docstrings as unprefixed to avoid this. This means Mercurial
152 # docstrings as unprefixed to avoid this. This means Mercurial
156 # components touching docstrings need to handle unicode,
153 # components touching docstrings need to handle unicode,
157 # unfortunately.
154 # unfortunately.
158 if s[0:3] in ("'''", '"""'):
155 if s[0:3] in ("'''", '"""'):
159 yield t
156 yield t
160 continue
157 continue
161
158
162 # If the first character isn't a quote, it is likely a string
159 # If the first character isn't a quote, it is likely a string
163 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
160 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
164 if s[0] not in ("'", '"'):
161 if s[0] not in ("'", '"'):
165 yield t
162 yield t
166 continue
163 continue
167
164
168 # String literal. Prefix to make a b'' string.
165 # String literal. Prefix to make a b'' string.
169 yield t._replace(string='b%s' % t.string)
166 yield t._replace(string='b%s' % t.string)
170 continue
167 continue
171
168
172 # Insert compatibility imports at "from __future__ import" line.
169 # Insert compatibility imports at "from __future__ import" line.
173 # No '\n' should be added to preserve line numbers.
170 # No '\n' should be added to preserve line numbers.
174 if (t.type == token.NAME and t.string == 'import' and
171 if (t.type == token.NAME and t.string == 'import' and
175 all(u.type == token.NAME for u in tokens[i - 2:i]) and
172 all(u.type == token.NAME for u in tokens[i - 2:i]) and
176 [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
173 [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
177 futureimpline = True
174 futureimpline = True
178 if t.type == token.NEWLINE and futureimpline:
175 if t.type == token.NEWLINE and futureimpline:
179 futureimpline = False
176 futureimpline = False
180 if fullname == 'mercurial.pycompat':
177 if fullname == 'mercurial.pycompat':
181 yield t
178 yield t
182 continue
179 continue
183 r, c = t.start
180 r, c = t.start
184 l = (b'; from mercurial.pycompat import '
181 l = (b'; from mercurial.pycompat import '
185 b'delattr, getattr, hasattr, setattr, xrange, '
182 b'delattr, getattr, hasattr, setattr, xrange, '
186 b'open, unicode\n')
183 b'open, unicode\n')
187 for u in tokenize.tokenize(io.BytesIO(l).readline):
184 for u in tokenize.tokenize(io.BytesIO(l).readline):
188 if u.type in (tokenize.ENCODING, token.ENDMARKER):
185 if u.type in (tokenize.ENCODING, token.ENDMARKER):
189 continue
186 continue
190 yield u._replace(
187 yield u._replace(
191 start=(r, c + u.start[1]), end=(r, c + u.end[1]))
188 start=(r, c + u.start[1]), end=(r, c + u.end[1]))
192 continue
189 continue
193
190
194 # This looks like a function call.
191 # This looks like a function call.
195 if t.type == token.NAME and _isop(i + 1, '('):
192 if t.type == token.NAME and _isop(i + 1, '('):
196 fn = t.string
193 fn = t.string
197
194
198 # *attr() builtins don't accept byte strings to 2nd argument.
195 # *attr() builtins don't accept byte strings to 2nd argument.
199 if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
196 if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
200 not _isop(i - 1, '.')):
197 not _isop(i - 1, '.')):
201 arg1idx = _findargnofcall(1)
198 arg1idx = _findargnofcall(1)
202 if arg1idx is not None:
199 if arg1idx is not None:
203 _ensureunicode(arg1idx)
200 _ensureunicode(arg1idx)
204
201
205 # .encode() and .decode() on str/bytes/unicode don't accept
202 # .encode() and .decode() on str/bytes/unicode don't accept
206 # byte strings on Python 3.
203 # byte strings on Python 3.
207 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
204 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
208 for argn in range(2):
205 for argn in range(2):
209 argidx = _findargnofcall(argn)
206 argidx = _findargnofcall(argn)
210 if argidx is not None:
207 if argidx is not None:
211 _ensureunicode(argidx)
208 _ensureunicode(argidx)
212
209
213 # It changes iteritems/values to items/values as they are not
210 # It changes iteritems/values to items/values as they are not
214 # present in Python 3 world.
211 # present in Python 3 world.
215 elif fn in ('iteritems', 'itervalues'):
212 elif fn in ('iteritems', 'itervalues'):
216 yield t._replace(string=fn[4:])
213 yield t._replace(string=fn[4:])
217 continue
214 continue
218
215
219 # Emit unmodified token.
216 # Emit unmodified token.
220 yield t
217 yield t
221
218
222 # Header to add to bytecode files. This MUST be changed when
219 # Header to add to bytecode files. This MUST be changed when
223 # ``replacetoken`` or any mechanism that changes semantics of module
220 # ``replacetoken`` or any mechanism that changes semantics of module
224 # loading is changed. Otherwise cached bytecode may get loaded without
221 # loading is changed. Otherwise cached bytecode may get loaded without
225 # the new transformation mechanisms applied.
222 # the new transformation mechanisms applied.
226 BYTECODEHEADER = b'HG\x00\x0a'
223 BYTECODEHEADER = b'HG\x00\x0a'
227
224
228 class hgloader(importlib.machinery.SourceFileLoader):
225 class hgloader(importlib.machinery.SourceFileLoader):
229 """Custom module loader that transforms source code.
226 """Custom module loader that transforms source code.
230
227
231 When the source code is converted to a code object, we transform
228 When the source code is converted to a code object, we transform
232 certain patterns to be Python 3 compatible. This allows us to write code
229 certain patterns to be Python 3 compatible. This allows us to write code
233 that is natively Python 2 and compatible with Python 3 without
230 that is natively Python 2 and compatible with Python 3 without
234 making the code excessively ugly.
231 making the code excessively ugly.
235
232
236 We do this by transforming the token stream between parse and compile.
233 We do this by transforming the token stream between parse and compile.
237
234
238 Implementing transformations invalidates caching assumptions made
235 Implementing transformations invalidates caching assumptions made
239 by the built-in importer. The built-in importer stores a header on
236 by the built-in importer. The built-in importer stores a header on
240 saved bytecode files indicating the Python/bytecode version. If the
237 saved bytecode files indicating the Python/bytecode version. If the
241 version changes, the cached bytecode is ignored. The Mercurial
238 version changes, the cached bytecode is ignored. The Mercurial
242 transformations could change at any time. This means we need to check
239 transformations could change at any time. This means we need to check
243 that cached bytecode was generated with the current transformation
240 that cached bytecode was generated with the current transformation
244 code or there could be a mismatch between cached bytecode and what
241 code or there could be a mismatch between cached bytecode and what
245 would be generated from this class.
242 would be generated from this class.
246
243
247 We supplement the bytecode caching layer by wrapping ``get_data``
244 We supplement the bytecode caching layer by wrapping ``get_data``
248 and ``set_data``. These functions are called when the
245 and ``set_data``. These functions are called when the
249 ``SourceFileLoader`` retrieves and saves bytecode cache files,
246 ``SourceFileLoader`` retrieves and saves bytecode cache files,
250 respectively. We simply add an additional header on the file. As
247 respectively. We simply add an additional header on the file. As
251 long as the version in this file is changed when semantics change,
248 long as the version in this file is changed when semantics change,
252 cached bytecode should be invalidated when transformations change.
249 cached bytecode should be invalidated when transformations change.
253
250
254 The added header has the form ``HG<VERSION>``. That is a literal
251 The added header has the form ``HG<VERSION>``. That is a literal
255 ``HG`` with 2 binary bytes indicating the transformation version.
252 ``HG`` with 2 binary bytes indicating the transformation version.
256 """
253 """
257 def get_data(self, path):
254 def get_data(self, path):
258 data = super(hgloader, self).get_data(path)
255 data = super(hgloader, self).get_data(path)
259
256
260 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
257 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
261 return data
258 return data
262
259
263 # There should be a header indicating the Mercurial transformation
260 # There should be a header indicating the Mercurial transformation
264 # version. If it doesn't exist or doesn't match the current version,
261 # version. If it doesn't exist or doesn't match the current version,
265 # we raise an OSError because that is what
262 # we raise an OSError because that is what
266 # ``SourceFileLoader.get_code()`` expects when loading bytecode
263 # ``SourceFileLoader.get_code()`` expects when loading bytecode
267 # paths to indicate the cached file is "bad."
264 # paths to indicate the cached file is "bad."
268 if data[0:2] != b'HG':
265 if data[0:2] != b'HG':
269 raise OSError('no hg header')
266 raise OSError('no hg header')
270 if data[0:4] != BYTECODEHEADER:
267 if data[0:4] != BYTECODEHEADER:
271 raise OSError('hg header version mismatch')
268 raise OSError('hg header version mismatch')
272
269
273 return data[4:]
270 return data[4:]
274
271
275 def set_data(self, path, data, *args, **kwargs):
272 def set_data(self, path, data, *args, **kwargs):
276 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
273 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
277 data = BYTECODEHEADER + data
274 data = BYTECODEHEADER + data
278
275
279 return super(hgloader, self).set_data(path, data, *args, **kwargs)
276 return super(hgloader, self).set_data(path, data, *args, **kwargs)
280
277
281 def source_to_code(self, data, path):
278 def source_to_code(self, data, path):
282 """Perform token transformation before compilation."""
279 """Perform token transformation before compilation."""
283 buf = io.BytesIO(data)
280 buf = io.BytesIO(data)
284 tokens = tokenize.tokenize(buf.readline)
281 tokens = tokenize.tokenize(buf.readline)
285 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
282 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
286 # Python's built-in importer strips frames from exceptions raised
283 # Python's built-in importer strips frames from exceptions raised
287 # for this code. Unfortunately, that mechanism isn't extensible
284 # for this code. Unfortunately, that mechanism isn't extensible
288 # and our frame will be blamed for the import failure. There
285 # and our frame will be blamed for the import failure. There
289 # are extremely hacky ways to do frame stripping. We haven't
286 # are extremely hacky ways to do frame stripping. We haven't
290 # implemented them because they are very ugly.
287 # implemented them because they are very ugly.
291 return super(hgloader, self).source_to_code(data, path)
288 return super(hgloader, self).source_to_code(data, path)
292
289
293 # We automagically register our custom importer as a side-effect of
290 # We automagically register our custom importer as a side-effect of
294 # loading. This is necessary to ensure that any entry points are able
291 # loading. This is necessary to ensure that any entry points are able
295 # to import mercurial.* modules without having to perform this
292 # to import mercurial.* modules without having to perform this
296 # registration themselves.
293 # registration themselves.
297 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
294 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
298 # meta_path is used before any implicit finders and before sys.path.
295 # meta_path is used before any implicit finders and before sys.path.
299 sys.meta_path.insert(0, hgpathentryfinder())
296 sys.meta_path.insert(0, hgpathentryfinder())
@@ -1,549 +1,549 b''
1 # commandserver.py - communicate with Mercurial's API over a pipe
1 # commandserver.py - communicate with Mercurial's API over a pipe
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import gc
11 import gc
12 import os
12 import os
13 import random
13 import random
14 import signal
14 import signal
15 import socket
15 import socket
16 import struct
16 import struct
17 import traceback
17 import traceback
18
18
19 from .i18n import _
19 from .i18n import _
20 from .thirdparty import selectors2
20 from . import (
21 from . import (
21 encoding,
22 encoding,
22 error,
23 error,
23 pycompat,
24 pycompat,
24 selectors2,
25 util,
25 util,
26 )
26 )
27
27
28 logfile = None
28 logfile = None
29
29
30 def log(*args):
30 def log(*args):
31 if not logfile:
31 if not logfile:
32 return
32 return
33
33
34 for a in args:
34 for a in args:
35 logfile.write(str(a))
35 logfile.write(str(a))
36
36
37 logfile.flush()
37 logfile.flush()
38
38
39 class channeledoutput(object):
39 class channeledoutput(object):
40 """
40 """
41 Write data to out in the following format:
41 Write data to out in the following format:
42
42
43 data length (unsigned int),
43 data length (unsigned int),
44 data
44 data
45 """
45 """
46 def __init__(self, out, channel):
46 def __init__(self, out, channel):
47 self.out = out
47 self.out = out
48 self.channel = channel
48 self.channel = channel
49
49
50 @property
50 @property
51 def name(self):
51 def name(self):
52 return '<%c-channel>' % self.channel
52 return '<%c-channel>' % self.channel
53
53
54 def write(self, data):
54 def write(self, data):
55 if not data:
55 if not data:
56 return
56 return
57 # single write() to guarantee the same atomicity as the underlying file
57 # single write() to guarantee the same atomicity as the underlying file
58 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
58 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
59 self.out.flush()
59 self.out.flush()
60
60
61 def __getattr__(self, attr):
61 def __getattr__(self, attr):
62 if attr in ('isatty', 'fileno', 'tell', 'seek'):
62 if attr in ('isatty', 'fileno', 'tell', 'seek'):
63 raise AttributeError(attr)
63 raise AttributeError(attr)
64 return getattr(self.out, attr)
64 return getattr(self.out, attr)
65
65
66 class channeledinput(object):
66 class channeledinput(object):
67 """
67 """
68 Read data from in_.
68 Read data from in_.
69
69
70 Requests for input are written to out in the following format:
70 Requests for input are written to out in the following format:
71 channel identifier - 'I' for plain input, 'L' line based (1 byte)
71 channel identifier - 'I' for plain input, 'L' line based (1 byte)
72 how many bytes to send at most (unsigned int),
72 how many bytes to send at most (unsigned int),
73
73
74 The client replies with:
74 The client replies with:
75 data length (unsigned int), 0 meaning EOF
75 data length (unsigned int), 0 meaning EOF
76 data
76 data
77 """
77 """
78
78
79 maxchunksize = 4 * 1024
79 maxchunksize = 4 * 1024
80
80
81 def __init__(self, in_, out, channel):
81 def __init__(self, in_, out, channel):
82 self.in_ = in_
82 self.in_ = in_
83 self.out = out
83 self.out = out
84 self.channel = channel
84 self.channel = channel
85
85
86 @property
86 @property
87 def name(self):
87 def name(self):
88 return '<%c-channel>' % self.channel
88 return '<%c-channel>' % self.channel
89
89
90 def read(self, size=-1):
90 def read(self, size=-1):
91 if size < 0:
91 if size < 0:
92 # if we need to consume all the clients input, ask for 4k chunks
92 # if we need to consume all the clients input, ask for 4k chunks
93 # so the pipe doesn't fill up risking a deadlock
93 # so the pipe doesn't fill up risking a deadlock
94 size = self.maxchunksize
94 size = self.maxchunksize
95 s = self._read(size, self.channel)
95 s = self._read(size, self.channel)
96 buf = s
96 buf = s
97 while s:
97 while s:
98 s = self._read(size, self.channel)
98 s = self._read(size, self.channel)
99 buf += s
99 buf += s
100
100
101 return buf
101 return buf
102 else:
102 else:
103 return self._read(size, self.channel)
103 return self._read(size, self.channel)
104
104
105 def _read(self, size, channel):
105 def _read(self, size, channel):
106 if not size:
106 if not size:
107 return ''
107 return ''
108 assert size > 0
108 assert size > 0
109
109
110 # tell the client we need at most size bytes
110 # tell the client we need at most size bytes
111 self.out.write(struct.pack('>cI', channel, size))
111 self.out.write(struct.pack('>cI', channel, size))
112 self.out.flush()
112 self.out.flush()
113
113
114 length = self.in_.read(4)
114 length = self.in_.read(4)
115 length = struct.unpack('>I', length)[0]
115 length = struct.unpack('>I', length)[0]
116 if not length:
116 if not length:
117 return ''
117 return ''
118 else:
118 else:
119 return self.in_.read(length)
119 return self.in_.read(length)
120
120
121 def readline(self, size=-1):
121 def readline(self, size=-1):
122 if size < 0:
122 if size < 0:
123 size = self.maxchunksize
123 size = self.maxchunksize
124 s = self._read(size, 'L')
124 s = self._read(size, 'L')
125 buf = s
125 buf = s
126 # keep asking for more until there's either no more or
126 # keep asking for more until there's either no more or
127 # we got a full line
127 # we got a full line
128 while s and s[-1] != '\n':
128 while s and s[-1] != '\n':
129 s = self._read(size, 'L')
129 s = self._read(size, 'L')
130 buf += s
130 buf += s
131
131
132 return buf
132 return buf
133 else:
133 else:
134 return self._read(size, 'L')
134 return self._read(size, 'L')
135
135
136 def __iter__(self):
136 def __iter__(self):
137 return self
137 return self
138
138
139 def next(self):
139 def next(self):
140 l = self.readline()
140 l = self.readline()
141 if not l:
141 if not l:
142 raise StopIteration
142 raise StopIteration
143 return l
143 return l
144
144
145 def __getattr__(self, attr):
145 def __getattr__(self, attr):
146 if attr in ('isatty', 'fileno', 'tell', 'seek'):
146 if attr in ('isatty', 'fileno', 'tell', 'seek'):
147 raise AttributeError(attr)
147 raise AttributeError(attr)
148 return getattr(self.in_, attr)
148 return getattr(self.in_, attr)
149
149
150 class server(object):
150 class server(object):
151 """
151 """
152 Listens for commands on fin, runs them and writes the output on a channel
152 Listens for commands on fin, runs them and writes the output on a channel
153 based stream to fout.
153 based stream to fout.
154 """
154 """
155 def __init__(self, ui, repo, fin, fout):
155 def __init__(self, ui, repo, fin, fout):
156 self.cwd = pycompat.getcwd()
156 self.cwd = pycompat.getcwd()
157
157
158 # developer config: cmdserver.log
158 # developer config: cmdserver.log
159 logpath = ui.config("cmdserver", "log")
159 logpath = ui.config("cmdserver", "log")
160 if logpath:
160 if logpath:
161 global logfile
161 global logfile
162 if logpath == '-':
162 if logpath == '-':
163 # write log on a special 'd' (debug) channel
163 # write log on a special 'd' (debug) channel
164 logfile = channeledoutput(fout, 'd')
164 logfile = channeledoutput(fout, 'd')
165 else:
165 else:
166 logfile = open(logpath, 'a')
166 logfile = open(logpath, 'a')
167
167
168 if repo:
168 if repo:
169 # the ui here is really the repo ui so take its baseui so we don't
169 # the ui here is really the repo ui so take its baseui so we don't
170 # end up with its local configuration
170 # end up with its local configuration
171 self.ui = repo.baseui
171 self.ui = repo.baseui
172 self.repo = repo
172 self.repo = repo
173 self.repoui = repo.ui
173 self.repoui = repo.ui
174 else:
174 else:
175 self.ui = ui
175 self.ui = ui
176 self.repo = self.repoui = None
176 self.repo = self.repoui = None
177
177
178 self.cerr = channeledoutput(fout, 'e')
178 self.cerr = channeledoutput(fout, 'e')
179 self.cout = channeledoutput(fout, 'o')
179 self.cout = channeledoutput(fout, 'o')
180 self.cin = channeledinput(fin, fout, 'I')
180 self.cin = channeledinput(fin, fout, 'I')
181 self.cresult = channeledoutput(fout, 'r')
181 self.cresult = channeledoutput(fout, 'r')
182
182
183 self.client = fin
183 self.client = fin
184
184
185 def cleanup(self):
185 def cleanup(self):
186 """release and restore resources taken during server session"""
186 """release and restore resources taken during server session"""
187
187
188 def _read(self, size):
188 def _read(self, size):
189 if not size:
189 if not size:
190 return ''
190 return ''
191
191
192 data = self.client.read(size)
192 data = self.client.read(size)
193
193
194 # is the other end closed?
194 # is the other end closed?
195 if not data:
195 if not data:
196 raise EOFError
196 raise EOFError
197
197
198 return data
198 return data
199
199
200 def _readstr(self):
200 def _readstr(self):
201 """read a string from the channel
201 """read a string from the channel
202
202
203 format:
203 format:
204 data length (uint32), data
204 data length (uint32), data
205 """
205 """
206 length = struct.unpack('>I', self._read(4))[0]
206 length = struct.unpack('>I', self._read(4))[0]
207 if not length:
207 if not length:
208 return ''
208 return ''
209 return self._read(length)
209 return self._read(length)
210
210
211 def _readlist(self):
211 def _readlist(self):
212 """read a list of NULL separated strings from the channel"""
212 """read a list of NULL separated strings from the channel"""
213 s = self._readstr()
213 s = self._readstr()
214 if s:
214 if s:
215 return s.split('\0')
215 return s.split('\0')
216 else:
216 else:
217 return []
217 return []
218
218
219 def runcommand(self):
219 def runcommand(self):
220 """ reads a list of \0 terminated arguments, executes
220 """ reads a list of \0 terminated arguments, executes
221 and writes the return code to the result channel """
221 and writes the return code to the result channel """
222 from . import dispatch # avoid cycle
222 from . import dispatch # avoid cycle
223
223
224 args = self._readlist()
224 args = self._readlist()
225
225
226 # copy the uis so changes (e.g. --config or --verbose) don't
226 # copy the uis so changes (e.g. --config or --verbose) don't
227 # persist between requests
227 # persist between requests
228 copiedui = self.ui.copy()
228 copiedui = self.ui.copy()
229 uis = [copiedui]
229 uis = [copiedui]
230 if self.repo:
230 if self.repo:
231 self.repo.baseui = copiedui
231 self.repo.baseui = copiedui
232 # clone ui without using ui.copy because this is protected
232 # clone ui without using ui.copy because this is protected
233 repoui = self.repoui.__class__(self.repoui)
233 repoui = self.repoui.__class__(self.repoui)
234 repoui.copy = copiedui.copy # redo copy protection
234 repoui.copy = copiedui.copy # redo copy protection
235 uis.append(repoui)
235 uis.append(repoui)
236 self.repo.ui = self.repo.dirstate._ui = repoui
236 self.repo.ui = self.repo.dirstate._ui = repoui
237 self.repo.invalidateall()
237 self.repo.invalidateall()
238
238
239 for ui in uis:
239 for ui in uis:
240 ui.resetstate()
240 ui.resetstate()
241 # any kind of interaction must use server channels, but chg may
241 # any kind of interaction must use server channels, but chg may
242 # replace channels by fully functional tty files. so nontty is
242 # replace channels by fully functional tty files. so nontty is
243 # enforced only if cin is a channel.
243 # enforced only if cin is a channel.
244 if not util.safehasattr(self.cin, 'fileno'):
244 if not util.safehasattr(self.cin, 'fileno'):
245 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
245 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
246
246
247 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
247 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
248 self.cout, self.cerr)
248 self.cout, self.cerr)
249
249
250 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
250 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
251
251
252 # restore old cwd
252 # restore old cwd
253 if '--cwd' in args:
253 if '--cwd' in args:
254 os.chdir(self.cwd)
254 os.chdir(self.cwd)
255
255
256 self.cresult.write(struct.pack('>i', int(ret)))
256 self.cresult.write(struct.pack('>i', int(ret)))
257
257
258 def getencoding(self):
258 def getencoding(self):
259 """ writes the current encoding to the result channel """
259 """ writes the current encoding to the result channel """
260 self.cresult.write(encoding.encoding)
260 self.cresult.write(encoding.encoding)
261
261
262 def serveone(self):
262 def serveone(self):
263 cmd = self.client.readline()[:-1]
263 cmd = self.client.readline()[:-1]
264 if cmd:
264 if cmd:
265 handler = self.capabilities.get(cmd)
265 handler = self.capabilities.get(cmd)
266 if handler:
266 if handler:
267 handler(self)
267 handler(self)
268 else:
268 else:
269 # clients are expected to check what commands are supported by
269 # clients are expected to check what commands are supported by
270 # looking at the servers capabilities
270 # looking at the servers capabilities
271 raise error.Abort(_('unknown command %s') % cmd)
271 raise error.Abort(_('unknown command %s') % cmd)
272
272
273 return cmd != ''
273 return cmd != ''
274
274
275 capabilities = {'runcommand': runcommand,
275 capabilities = {'runcommand': runcommand,
276 'getencoding': getencoding}
276 'getencoding': getencoding}
277
277
278 def serve(self):
278 def serve(self):
279 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
279 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
280 hellomsg += '\n'
280 hellomsg += '\n'
281 hellomsg += 'encoding: ' + encoding.encoding
281 hellomsg += 'encoding: ' + encoding.encoding
282 hellomsg += '\n'
282 hellomsg += '\n'
283 hellomsg += 'pid: %d' % util.getpid()
283 hellomsg += 'pid: %d' % util.getpid()
284 if util.safehasattr(os, 'getpgid'):
284 if util.safehasattr(os, 'getpgid'):
285 hellomsg += '\n'
285 hellomsg += '\n'
286 hellomsg += 'pgid: %d' % os.getpgid(0)
286 hellomsg += 'pgid: %d' % os.getpgid(0)
287
287
288 # write the hello msg in -one- chunk
288 # write the hello msg in -one- chunk
289 self.cout.write(hellomsg)
289 self.cout.write(hellomsg)
290
290
291 try:
291 try:
292 while self.serveone():
292 while self.serveone():
293 pass
293 pass
294 except EOFError:
294 except EOFError:
295 # we'll get here if the client disconnected while we were reading
295 # we'll get here if the client disconnected while we were reading
296 # its request
296 # its request
297 return 1
297 return 1
298
298
299 return 0
299 return 0
300
300
301 def _protectio(ui):
301 def _protectio(ui):
302 """ duplicates streams and redirect original to null if ui uses stdio """
302 """ duplicates streams and redirect original to null if ui uses stdio """
303 ui.flush()
303 ui.flush()
304 newfiles = []
304 newfiles = []
305 nullfd = os.open(os.devnull, os.O_RDWR)
305 nullfd = os.open(os.devnull, os.O_RDWR)
306 for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
306 for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
307 (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
307 (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
308 if f is sysf:
308 if f is sysf:
309 newfd = os.dup(f.fileno())
309 newfd = os.dup(f.fileno())
310 os.dup2(nullfd, f.fileno())
310 os.dup2(nullfd, f.fileno())
311 f = os.fdopen(newfd, mode)
311 f = os.fdopen(newfd, mode)
312 newfiles.append(f)
312 newfiles.append(f)
313 os.close(nullfd)
313 os.close(nullfd)
314 return tuple(newfiles)
314 return tuple(newfiles)
315
315
316 def _restoreio(ui, fin, fout):
316 def _restoreio(ui, fin, fout):
317 """ restores streams from duplicated ones """
317 """ restores streams from duplicated ones """
318 ui.flush()
318 ui.flush()
319 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
319 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
320 if f is not uif:
320 if f is not uif:
321 os.dup2(f.fileno(), uif.fileno())
321 os.dup2(f.fileno(), uif.fileno())
322 f.close()
322 f.close()
323
323
324 class pipeservice(object):
324 class pipeservice(object):
325 def __init__(self, ui, repo, opts):
325 def __init__(self, ui, repo, opts):
326 self.ui = ui
326 self.ui = ui
327 self.repo = repo
327 self.repo = repo
328
328
329 def init(self):
329 def init(self):
330 pass
330 pass
331
331
332 def run(self):
332 def run(self):
333 ui = self.ui
333 ui = self.ui
334 # redirect stdio to null device so that broken extensions or in-process
334 # redirect stdio to null device so that broken extensions or in-process
335 # hooks will never cause corruption of channel protocol.
335 # hooks will never cause corruption of channel protocol.
336 fin, fout = _protectio(ui)
336 fin, fout = _protectio(ui)
337 try:
337 try:
338 sv = server(ui, self.repo, fin, fout)
338 sv = server(ui, self.repo, fin, fout)
339 return sv.serve()
339 return sv.serve()
340 finally:
340 finally:
341 sv.cleanup()
341 sv.cleanup()
342 _restoreio(ui, fin, fout)
342 _restoreio(ui, fin, fout)
343
343
344 def _initworkerprocess():
344 def _initworkerprocess():
345 # use a different process group from the master process, in order to:
345 # use a different process group from the master process, in order to:
346 # 1. make the current process group no longer "orphaned" (because the
346 # 1. make the current process group no longer "orphaned" (because the
347 # parent of this process is in a different process group while
347 # parent of this process is in a different process group while
348 # remains in a same session)
348 # remains in a same session)
349 # according to POSIX 2.2.2.52, orphaned process group will ignore
349 # according to POSIX 2.2.2.52, orphaned process group will ignore
350 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
350 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
351 # cause trouble for things like ncurses.
351 # cause trouble for things like ncurses.
352 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
352 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
353 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
353 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
354 # processes like ssh will be killed properly, without affecting
354 # processes like ssh will be killed properly, without affecting
355 # unrelated processes.
355 # unrelated processes.
356 os.setpgid(0, 0)
356 os.setpgid(0, 0)
357 # change random state otherwise forked request handlers would have a
357 # change random state otherwise forked request handlers would have a
358 # same state inherited from parent.
358 # same state inherited from parent.
359 random.seed()
359 random.seed()
360
360
361 def _serverequest(ui, repo, conn, createcmdserver):
361 def _serverequest(ui, repo, conn, createcmdserver):
362 fin = conn.makefile('rb')
362 fin = conn.makefile('rb')
363 fout = conn.makefile('wb')
363 fout = conn.makefile('wb')
364 sv = None
364 sv = None
365 try:
365 try:
366 sv = createcmdserver(repo, conn, fin, fout)
366 sv = createcmdserver(repo, conn, fin, fout)
367 try:
367 try:
368 sv.serve()
368 sv.serve()
369 # handle exceptions that may be raised by command server. most of
369 # handle exceptions that may be raised by command server. most of
370 # known exceptions are caught by dispatch.
370 # known exceptions are caught by dispatch.
371 except error.Abort as inst:
371 except error.Abort as inst:
372 ui.warn(_('abort: %s\n') % inst)
372 ui.warn(_('abort: %s\n') % inst)
373 except IOError as inst:
373 except IOError as inst:
374 if inst.errno != errno.EPIPE:
374 if inst.errno != errno.EPIPE:
375 raise
375 raise
376 except KeyboardInterrupt:
376 except KeyboardInterrupt:
377 pass
377 pass
378 finally:
378 finally:
379 sv.cleanup()
379 sv.cleanup()
380 except: # re-raises
380 except: # re-raises
381 # also write traceback to error channel. otherwise client cannot
381 # also write traceback to error channel. otherwise client cannot
382 # see it because it is written to server's stderr by default.
382 # see it because it is written to server's stderr by default.
383 if sv:
383 if sv:
384 cerr = sv.cerr
384 cerr = sv.cerr
385 else:
385 else:
386 cerr = channeledoutput(fout, 'e')
386 cerr = channeledoutput(fout, 'e')
387 traceback.print_exc(file=cerr)
387 traceback.print_exc(file=cerr)
388 raise
388 raise
389 finally:
389 finally:
390 fin.close()
390 fin.close()
391 try:
391 try:
392 fout.close() # implicit flush() may cause another EPIPE
392 fout.close() # implicit flush() may cause another EPIPE
393 except IOError as inst:
393 except IOError as inst:
394 if inst.errno != errno.EPIPE:
394 if inst.errno != errno.EPIPE:
395 raise
395 raise
396
396
397 class unixservicehandler(object):
397 class unixservicehandler(object):
398 """Set of pluggable operations for unix-mode services
398 """Set of pluggable operations for unix-mode services
399
399
400 Almost all methods except for createcmdserver() are called in the main
400 Almost all methods except for createcmdserver() are called in the main
401 process. You can't pass mutable resource back from createcmdserver().
401 process. You can't pass mutable resource back from createcmdserver().
402 """
402 """
403
403
404 pollinterval = None
404 pollinterval = None
405
405
406 def __init__(self, ui):
406 def __init__(self, ui):
407 self.ui = ui
407 self.ui = ui
408
408
409 def bindsocket(self, sock, address):
409 def bindsocket(self, sock, address):
410 util.bindunixsocket(sock, address)
410 util.bindunixsocket(sock, address)
411 sock.listen(socket.SOMAXCONN)
411 sock.listen(socket.SOMAXCONN)
412 self.ui.status(_('listening at %s\n') % address)
412 self.ui.status(_('listening at %s\n') % address)
413 self.ui.flush() # avoid buffering of status message
413 self.ui.flush() # avoid buffering of status message
414
414
415 def unlinksocket(self, address):
415 def unlinksocket(self, address):
416 os.unlink(address)
416 os.unlink(address)
417
417
418 def shouldexit(self):
418 def shouldexit(self):
419 """True if server should shut down; checked per pollinterval"""
419 """True if server should shut down; checked per pollinterval"""
420 return False
420 return False
421
421
422 def newconnection(self):
422 def newconnection(self):
423 """Called when main process notices new connection"""
423 """Called when main process notices new connection"""
424
424
425 def createcmdserver(self, repo, conn, fin, fout):
425 def createcmdserver(self, repo, conn, fin, fout):
426 """Create new command server instance; called in the process that
426 """Create new command server instance; called in the process that
427 serves for the current connection"""
427 serves for the current connection"""
428 return server(self.ui, repo, fin, fout)
428 return server(self.ui, repo, fin, fout)
429
429
430 class unixforkingservice(object):
430 class unixforkingservice(object):
431 """
431 """
432 Listens on unix domain socket and forks server per connection
432 Listens on unix domain socket and forks server per connection
433 """
433 """
434
434
435 def __init__(self, ui, repo, opts, handler=None):
435 def __init__(self, ui, repo, opts, handler=None):
436 self.ui = ui
436 self.ui = ui
437 self.repo = repo
437 self.repo = repo
438 self.address = opts['address']
438 self.address = opts['address']
439 if not util.safehasattr(socket, 'AF_UNIX'):
439 if not util.safehasattr(socket, 'AF_UNIX'):
440 raise error.Abort(_('unsupported platform'))
440 raise error.Abort(_('unsupported platform'))
441 if not self.address:
441 if not self.address:
442 raise error.Abort(_('no socket path specified with --address'))
442 raise error.Abort(_('no socket path specified with --address'))
443 self._servicehandler = handler or unixservicehandler(ui)
443 self._servicehandler = handler or unixservicehandler(ui)
444 self._sock = None
444 self._sock = None
445 self._oldsigchldhandler = None
445 self._oldsigchldhandler = None
446 self._workerpids = set() # updated by signal handler; do not iterate
446 self._workerpids = set() # updated by signal handler; do not iterate
447 self._socketunlinked = None
447 self._socketunlinked = None
448
448
449 def init(self):
449 def init(self):
450 self._sock = socket.socket(socket.AF_UNIX)
450 self._sock = socket.socket(socket.AF_UNIX)
451 self._servicehandler.bindsocket(self._sock, self.address)
451 self._servicehandler.bindsocket(self._sock, self.address)
452 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
452 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
453 self._oldsigchldhandler = o
453 self._oldsigchldhandler = o
454 self._socketunlinked = False
454 self._socketunlinked = False
455
455
456 def _unlinksocket(self):
456 def _unlinksocket(self):
457 if not self._socketunlinked:
457 if not self._socketunlinked:
458 self._servicehandler.unlinksocket(self.address)
458 self._servicehandler.unlinksocket(self.address)
459 self._socketunlinked = True
459 self._socketunlinked = True
460
460
461 def _cleanup(self):
461 def _cleanup(self):
462 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
462 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
463 self._sock.close()
463 self._sock.close()
464 self._unlinksocket()
464 self._unlinksocket()
465 # don't kill child processes as they have active clients, just wait
465 # don't kill child processes as they have active clients, just wait
466 self._reapworkers(0)
466 self._reapworkers(0)
467
467
468 def run(self):
468 def run(self):
469 try:
469 try:
470 self._mainloop()
470 self._mainloop()
471 finally:
471 finally:
472 self._cleanup()
472 self._cleanup()
473
473
474 def _mainloop(self):
474 def _mainloop(self):
475 exiting = False
475 exiting = False
476 h = self._servicehandler
476 h = self._servicehandler
477 selector = selectors2.DefaultSelector()
477 selector = selectors2.DefaultSelector()
478 selector.register(self._sock, selectors2.EVENT_READ)
478 selector.register(self._sock, selectors2.EVENT_READ)
479 while True:
479 while True:
480 if not exiting and h.shouldexit():
480 if not exiting and h.shouldexit():
481 # clients can no longer connect() to the domain socket, so
481 # clients can no longer connect() to the domain socket, so
482 # we stop queuing new requests.
482 # we stop queuing new requests.
483 # for requests that are queued (connect()-ed, but haven't been
483 # for requests that are queued (connect()-ed, but haven't been
484 # accept()-ed), handle them before exit. otherwise, clients
484 # accept()-ed), handle them before exit. otherwise, clients
485 # waiting for recv() will receive ECONNRESET.
485 # waiting for recv() will receive ECONNRESET.
486 self._unlinksocket()
486 self._unlinksocket()
487 exiting = True
487 exiting = True
488 ready = selector.select(timeout=h.pollinterval)
488 ready = selector.select(timeout=h.pollinterval)
489 if not ready:
489 if not ready:
490 # only exit if we completed all queued requests
490 # only exit if we completed all queued requests
491 if exiting:
491 if exiting:
492 break
492 break
493 continue
493 continue
494 try:
494 try:
495 conn, _addr = self._sock.accept()
495 conn, _addr = self._sock.accept()
496 except socket.error as inst:
496 except socket.error as inst:
497 if inst.args[0] == errno.EINTR:
497 if inst.args[0] == errno.EINTR:
498 continue
498 continue
499 raise
499 raise
500
500
501 pid = os.fork()
501 pid = os.fork()
502 if pid:
502 if pid:
503 try:
503 try:
504 self.ui.debug('forked worker process (pid=%d)\n' % pid)
504 self.ui.debug('forked worker process (pid=%d)\n' % pid)
505 self._workerpids.add(pid)
505 self._workerpids.add(pid)
506 h.newconnection()
506 h.newconnection()
507 finally:
507 finally:
508 conn.close() # release handle in parent process
508 conn.close() # release handle in parent process
509 else:
509 else:
510 try:
510 try:
511 self._runworker(conn)
511 self._runworker(conn)
512 conn.close()
512 conn.close()
513 os._exit(0)
513 os._exit(0)
514 except: # never return, hence no re-raises
514 except: # never return, hence no re-raises
515 try:
515 try:
516 self.ui.traceback(force=True)
516 self.ui.traceback(force=True)
517 finally:
517 finally:
518 os._exit(255)
518 os._exit(255)
519 selector.close()
519 selector.close()
520
520
521 def _sigchldhandler(self, signal, frame):
521 def _sigchldhandler(self, signal, frame):
522 self._reapworkers(os.WNOHANG)
522 self._reapworkers(os.WNOHANG)
523
523
524 def _reapworkers(self, options):
524 def _reapworkers(self, options):
525 while self._workerpids:
525 while self._workerpids:
526 try:
526 try:
527 pid, _status = os.waitpid(-1, options)
527 pid, _status = os.waitpid(-1, options)
528 except OSError as inst:
528 except OSError as inst:
529 if inst.errno == errno.EINTR:
529 if inst.errno == errno.EINTR:
530 continue
530 continue
531 if inst.errno != errno.ECHILD:
531 if inst.errno != errno.ECHILD:
532 raise
532 raise
533 # no child processes at all (reaped by other waitpid()?)
533 # no child processes at all (reaped by other waitpid()?)
534 self._workerpids.clear()
534 self._workerpids.clear()
535 return
535 return
536 if pid == 0:
536 if pid == 0:
537 # no waitable child processes
537 # no waitable child processes
538 return
538 return
539 self.ui.debug('worker process exited (pid=%d)\n' % pid)
539 self.ui.debug('worker process exited (pid=%d)\n' % pid)
540 self._workerpids.discard(pid)
540 self._workerpids.discard(pid)
541
541
542 def _runworker(self, conn):
542 def _runworker(self, conn):
543 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
543 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
544 _initworkerprocess()
544 _initworkerprocess()
545 h = self._servicehandler
545 h = self._servicehandler
546 try:
546 try:
547 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
547 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
548 finally:
548 finally:
549 gc.collect() # trigger __del__ since worker process uses os._exit
549 gc.collect() # trigger __del__ since worker process uses os._exit
@@ -1,745 +1,743 b''
1 """ Back-ported, durable, and portable selectors """
1 """ Back-ported, durable, and portable selectors """
2
2
3 # MIT License
3 # MIT License
4 #
4 #
5 # Copyright (c) 2017 Seth Michael Larson
5 # Copyright (c) 2017 Seth Michael Larson
6 #
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
12 # furnished to do so, subject to the following conditions:
13 #
13 #
14 # The above copyright notice and this permission notice shall be included in all
14 # The above copyright notice and this permission notice shall be included in all
15 # copies or substantial portions of the Software.
15 # copies or substantial portions of the Software.
16 #
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
23 # SOFTWARE.
24
24
25 # no-check-code
26
27 from __future__ import absolute_import
25 from __future__ import absolute_import
28
26
29 import collections
27 import collections
30 import errno
28 import errno
31 import math
29 import math
32 import select
30 import select
33 import socket
31 import socket
34 import sys
32 import sys
35 import time
33 import time
36
34
37 from . import pycompat
35 from .. import pycompat
38
36
39 namedtuple = collections.namedtuple
37 namedtuple = collections.namedtuple
40 Mapping = collections.Mapping
38 Mapping = collections.Mapping
41
39
42 try:
40 try:
43 monotonic = time.monotonic
41 monotonic = time.monotonic
44 except AttributeError:
42 except AttributeError:
45 monotonic = time.time
43 monotonic = time.time
46
44
47 __author__ = 'Seth Michael Larson'
45 __author__ = 'Seth Michael Larson'
48 __email__ = 'sethmichaellarson@protonmail.com'
46 __email__ = 'sethmichaellarson@protonmail.com'
49 __version__ = '2.0.0'
47 __version__ = '2.0.0'
50 __license__ = 'MIT'
48 __license__ = 'MIT'
51 __url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
49 __url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
52
50
53 __all__ = ['EVENT_READ',
51 __all__ = ['EVENT_READ',
54 'EVENT_WRITE',
52 'EVENT_WRITE',
55 'SelectorKey',
53 'SelectorKey',
56 'DefaultSelector',
54 'DefaultSelector',
57 'BaseSelector']
55 'BaseSelector']
58
56
59 EVENT_READ = (1 << 0)
57 EVENT_READ = (1 << 0)
60 EVENT_WRITE = (1 << 1)
58 EVENT_WRITE = (1 << 1)
61 _DEFAULT_SELECTOR = None
59 _DEFAULT_SELECTOR = None
62 _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
60 _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
63 _ERROR_TYPES = (OSError, IOError, socket.error)
61 _ERROR_TYPES = (OSError, IOError, socket.error)
64
62
65
63
66 SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
64 SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
67
65
68
66
69 class _SelectorMapping(Mapping):
67 class _SelectorMapping(Mapping):
70 """ Mapping of file objects to selector keys """
68 """ Mapping of file objects to selector keys """
71
69
72 def __init__(self, selector):
70 def __init__(self, selector):
73 self._selector = selector
71 self._selector = selector
74
72
75 def __len__(self):
73 def __len__(self):
76 return len(self._selector._fd_to_key)
74 return len(self._selector._fd_to_key)
77
75
78 def __getitem__(self, fileobj):
76 def __getitem__(self, fileobj):
79 try:
77 try:
80 fd = self._selector._fileobj_lookup(fileobj)
78 fd = self._selector._fileobj_lookup(fileobj)
81 return self._selector._fd_to_key[fd]
79 return self._selector._fd_to_key[fd]
82 except KeyError:
80 except KeyError:
83 raise KeyError("{0!r} is not registered.".format(fileobj))
81 raise KeyError("{0!r} is not registered.".format(fileobj))
84
82
85 def __iter__(self):
83 def __iter__(self):
86 return iter(self._selector._fd_to_key)
84 return iter(self._selector._fd_to_key)
87
85
88
86
89 def _fileobj_to_fd(fileobj):
87 def _fileobj_to_fd(fileobj):
90 """ Return a file descriptor from a file object. If
88 """ Return a file descriptor from a file object. If
91 given an integer will simply return that integer back. """
89 given an integer will simply return that integer back. """
92 if isinstance(fileobj, int):
90 if isinstance(fileobj, int):
93 fd = fileobj
91 fd = fileobj
94 else:
92 else:
95 try:
93 try:
96 fd = int(fileobj.fileno())
94 fd = int(fileobj.fileno())
97 except (AttributeError, TypeError, ValueError):
95 except (AttributeError, TypeError, ValueError):
98 raise ValueError("Invalid file object: {0!r}".format(fileobj))
96 raise ValueError("Invalid file object: {0!r}".format(fileobj))
99 if fd < 0:
97 if fd < 0:
100 raise ValueError("Invalid file descriptor: {0}".format(fd))
98 raise ValueError("Invalid file descriptor: {0}".format(fd))
101 return fd
99 return fd
102
100
103
101
104 class BaseSelector(object):
102 class BaseSelector(object):
105 """ Abstract Selector class
103 """ Abstract Selector class
106
104
107 A selector supports registering file objects to be monitored
105 A selector supports registering file objects to be monitored
108 for specific I/O events.
106 for specific I/O events.
109
107
110 A file object is a file descriptor or any object with a
108 A file object is a file descriptor or any object with a
111 `fileno()` method. An arbitrary object can be attached to the
109 `fileno()` method. An arbitrary object can be attached to the
112 file object which can be used for example to store context info,
110 file object which can be used for example to store context info,
113 a callback, etc.
111 a callback, etc.
114
112
115 A selector can use various implementations (select(), poll(), epoll(),
113 A selector can use various implementations (select(), poll(), epoll(),
116 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
114 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
117 the most efficient implementation for the current platform.
115 the most efficient implementation for the current platform.
118 """
116 """
119 def __init__(self):
117 def __init__(self):
120 # Maps file descriptors to keys.
118 # Maps file descriptors to keys.
121 self._fd_to_key = {}
119 self._fd_to_key = {}
122
120
123 # Read-only mapping returned by get_map()
121 # Read-only mapping returned by get_map()
124 self._map = _SelectorMapping(self)
122 self._map = _SelectorMapping(self)
125
123
126 def _fileobj_lookup(self, fileobj):
124 def _fileobj_lookup(self, fileobj):
127 """ Return a file descriptor from a file object.
125 """ Return a file descriptor from a file object.
128 This wraps _fileobj_to_fd() to do an exhaustive
126 This wraps _fileobj_to_fd() to do an exhaustive
129 search in case the object is invalid but we still
127 search in case the object is invalid but we still
130 have it in our map. Used by unregister() so we can
128 have it in our map. Used by unregister() so we can
131 unregister an object that was previously registered
129 unregister an object that was previously registered
132 even if it is closed. It is also used by _SelectorMapping
130 even if it is closed. It is also used by _SelectorMapping
133 """
131 """
134 try:
132 try:
135 return _fileobj_to_fd(fileobj)
133 return _fileobj_to_fd(fileobj)
136 except ValueError:
134 except ValueError:
137
135
138 # Search through all our mapped keys.
136 # Search through all our mapped keys.
139 for key in self._fd_to_key.values():
137 for key in self._fd_to_key.values():
140 if key.fileobj is fileobj:
138 if key.fileobj is fileobj:
141 return key.fd
139 return key.fd
142
140
143 # Raise ValueError after all.
141 # Raise ValueError after all.
144 raise
142 raise
145
143
146 def register(self, fileobj, events, data=None):
144 def register(self, fileobj, events, data=None):
147 """ Register a file object for a set of events to monitor. """
145 """ Register a file object for a set of events to monitor. """
148 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
146 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
149 raise ValueError("Invalid events: {0!r}".format(events))
147 raise ValueError("Invalid events: {0!r}".format(events))
150
148
151 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
149 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
152
150
153 if key.fd in self._fd_to_key:
151 if key.fd in self._fd_to_key:
154 raise KeyError("{0!r} (FD {1}) is already registered"
152 raise KeyError("{0!r} (FD {1}) is already registered"
155 .format(fileobj, key.fd))
153 .format(fileobj, key.fd))
156
154
157 self._fd_to_key[key.fd] = key
155 self._fd_to_key[key.fd] = key
158 return key
156 return key
159
157
160 def unregister(self, fileobj):
158 def unregister(self, fileobj):
161 """ Unregister a file object from being monitored. """
159 """ Unregister a file object from being monitored. """
162 try:
160 try:
163 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
161 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
164 except KeyError:
162 except KeyError:
165 raise KeyError("{0!r} is not registered".format(fileobj))
163 raise KeyError("{0!r} is not registered".format(fileobj))
166
164
167 # Getting the fileno of a closed socket on Windows errors with EBADF.
165 # Getting the fileno of a closed socket on Windows errors with EBADF.
168 except socket.error as err:
166 except socket.error as err:
169 if err.errno != errno.EBADF:
167 if err.errno != errno.EBADF:
170 raise
168 raise
171 else:
169 else:
172 for key in self._fd_to_key.values():
170 for key in self._fd_to_key.values():
173 if key.fileobj is fileobj:
171 if key.fileobj is fileobj:
174 self._fd_to_key.pop(key.fd)
172 self._fd_to_key.pop(key.fd)
175 break
173 break
176 else:
174 else:
177 raise KeyError("{0!r} is not registered".format(fileobj))
175 raise KeyError("{0!r} is not registered".format(fileobj))
178 return key
176 return key
179
177
180 def modify(self, fileobj, events, data=None):
178 def modify(self, fileobj, events, data=None):
181 """ Change a registered file object monitored events and data. """
179 """ Change a registered file object monitored events and data. """
182 # NOTE: Some subclasses optimize this operation even further.
180 # NOTE: Some subclasses optimize this operation even further.
183 try:
181 try:
184 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
182 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
185 except KeyError:
183 except KeyError:
186 raise KeyError("{0!r} is not registered".format(fileobj))
184 raise KeyError("{0!r} is not registered".format(fileobj))
187
185
188 if events != key.events:
186 if events != key.events:
189 self.unregister(fileobj)
187 self.unregister(fileobj)
190 key = self.register(fileobj, events, data)
188 key = self.register(fileobj, events, data)
191
189
192 elif data != key.data:
190 elif data != key.data:
193 # Use a shortcut to update the data.
191 # Use a shortcut to update the data.
194 key = key._replace(data=data)
192 key = key._replace(data=data)
195 self._fd_to_key[key.fd] = key
193 self._fd_to_key[key.fd] = key
196
194
197 return key
195 return key
198
196
199 def select(self, timeout=None):
197 def select(self, timeout=None):
200 """ Perform the actual selection until some monitored file objects
198 """ Perform the actual selection until some monitored file objects
201 are ready or the timeout expires. """
199 are ready or the timeout expires. """
202 raise NotImplementedError()
200 raise NotImplementedError()
203
201
204 def close(self):
202 def close(self):
205 """ Close the selector. This must be called to ensure that all
203 """ Close the selector. This must be called to ensure that all
206 underlying resources are freed. """
204 underlying resources are freed. """
207 self._fd_to_key.clear()
205 self._fd_to_key.clear()
208 self._map = None
206 self._map = None
209
207
210 def get_key(self, fileobj):
208 def get_key(self, fileobj):
211 """ Return the key associated with a registered file object. """
209 """ Return the key associated with a registered file object. """
212 mapping = self.get_map()
210 mapping = self.get_map()
213 if mapping is None:
211 if mapping is None:
214 raise RuntimeError("Selector is closed")
212 raise RuntimeError("Selector is closed")
215 try:
213 try:
216 return mapping[fileobj]
214 return mapping[fileobj]
217 except KeyError:
215 except KeyError:
218 raise KeyError("{0!r} is not registered".format(fileobj))
216 raise KeyError("{0!r} is not registered".format(fileobj))
219
217
220 def get_map(self):
218 def get_map(self):
221 """ Return a mapping of file objects to selector keys """
219 """ Return a mapping of file objects to selector keys """
222 return self._map
220 return self._map
223
221
224 def _key_from_fd(self, fd):
222 def _key_from_fd(self, fd):
225 """ Return the key associated to a given file descriptor
223 """ Return the key associated to a given file descriptor
226 Return None if it is not found. """
224 Return None if it is not found. """
227 try:
225 try:
228 return self._fd_to_key[fd]
226 return self._fd_to_key[fd]
229 except KeyError:
227 except KeyError:
230 return None
228 return None
231
229
232 def __enter__(self):
230 def __enter__(self):
233 return self
231 return self
234
232
235 def __exit__(self, *_):
233 def __exit__(self, *_):
236 self.close()
234 self.close()
237
235
238
236
239 # Almost all platforms have select.select()
237 # Almost all platforms have select.select()
240 if hasattr(select, "select"):
238 if hasattr(select, "select"):
241 class SelectSelector(BaseSelector):
239 class SelectSelector(BaseSelector):
242 """ Select-based selector. """
240 """ Select-based selector. """
243 def __init__(self):
241 def __init__(self):
244 super(SelectSelector, self).__init__()
242 super(SelectSelector, self).__init__()
245 self._readers = set()
243 self._readers = set()
246 self._writers = set()
244 self._writers = set()
247
245
248 def register(self, fileobj, events, data=None):
246 def register(self, fileobj, events, data=None):
249 key = super(SelectSelector, self).register(fileobj, events, data)
247 key = super(SelectSelector, self).register(fileobj, events, data)
250 if events & EVENT_READ:
248 if events & EVENT_READ:
251 self._readers.add(key.fd)
249 self._readers.add(key.fd)
252 if events & EVENT_WRITE:
250 if events & EVENT_WRITE:
253 self._writers.add(key.fd)
251 self._writers.add(key.fd)
254 return key
252 return key
255
253
256 def unregister(self, fileobj):
254 def unregister(self, fileobj):
257 key = super(SelectSelector, self).unregister(fileobj)
255 key = super(SelectSelector, self).unregister(fileobj)
258 self._readers.discard(key.fd)
256 self._readers.discard(key.fd)
259 self._writers.discard(key.fd)
257 self._writers.discard(key.fd)
260 return key
258 return key
261
259
262 def select(self, timeout=None):
260 def select(self, timeout=None):
263 # Selecting on empty lists on Windows errors out.
261 # Selecting on empty lists on Windows errors out.
264 if not len(self._readers) and not len(self._writers):
262 if not len(self._readers) and not len(self._writers):
265 return []
263 return []
266
264
267 timeout = None if timeout is None else max(timeout, 0.0)
265 timeout = None if timeout is None else max(timeout, 0.0)
268 ready = []
266 ready = []
269 r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
267 r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
270 self._writers, timeout)
268 self._writers, timeout)
271 r = set(r)
269 r = set(r)
272 w = set(w)
270 w = set(w)
273 for fd in r | w:
271 for fd in r | w:
274 events = 0
272 events = 0
275 if fd in r:
273 if fd in r:
276 events |= EVENT_READ
274 events |= EVENT_READ
277 if fd in w:
275 if fd in w:
278 events |= EVENT_WRITE
276 events |= EVENT_WRITE
279
277
280 key = self._key_from_fd(fd)
278 key = self._key_from_fd(fd)
281 if key:
279 if key:
282 ready.append((key, events & key.events))
280 ready.append((key, events & key.events))
283 return ready
281 return ready
284
282
285 def _wrap_select(self, r, w, timeout=None):
283 def _wrap_select(self, r, w, timeout=None):
286 """ Wrapper for select.select because timeout is a positional arg """
284 """ Wrapper for select.select because timeout is a positional arg """
287 return select.select(r, w, [], timeout)
285 return select.select(r, w, [], timeout)
288
286
289 __all__.append('SelectSelector')
287 __all__.append('SelectSelector')
290
288
291 # Jython has a different implementation of .fileno() for socket objects.
289 # Jython has a different implementation of .fileno() for socket objects.
292 if pycompat.isjython:
290 if pycompat.isjython:
293 class _JythonSelectorMapping(object):
291 class _JythonSelectorMapping(object):
294 """ This is an implementation of _SelectorMapping that is built
292 """ This is an implementation of _SelectorMapping that is built
295 for use specifically with Jython, which does not provide a hashable
293 for use specifically with Jython, which does not provide a hashable
296 value from socket.socket.fileno(). """
294 value from socket.socket.fileno(). """
297
295
298 def __init__(self, selector):
296 def __init__(self, selector):
299 assert isinstance(selector, JythonSelectSelector)
297 assert isinstance(selector, JythonSelectSelector)
300 self._selector = selector
298 self._selector = selector
301
299
302 def __len__(self):
300 def __len__(self):
303 return len(self._selector._sockets)
301 return len(self._selector._sockets)
304
302
305 def __getitem__(self, fileobj):
303 def __getitem__(self, fileobj):
306 for sock, key in self._selector._sockets:
304 for sock, key in self._selector._sockets:
307 if sock is fileobj:
305 if sock is fileobj:
308 return key
306 return key
309 else:
307 else:
310 raise KeyError("{0!r} is not registered.".format(fileobj))
308 raise KeyError("{0!r} is not registered.".format(fileobj))
311
309
312 class JythonSelectSelector(SelectSelector):
310 class JythonSelectSelector(SelectSelector):
313 """ This is an implementation of SelectSelector that is for Jython
311 """ This is an implementation of SelectSelector that is for Jython
314 which works around that Jython's socket.socket.fileno() does not
312 which works around that Jython's socket.socket.fileno() does not
315 return an integer fd value. All SelectorKey.fd will be equal to -1
313 return an integer fd value. All SelectorKey.fd will be equal to -1
316 and should not be used. This instead uses object id to compare fileobj
314 and should not be used. This instead uses object id to compare fileobj
317 and will only use select.select as it's the only selector that allows
315 and will only use select.select as it's the only selector that allows
318 directly passing in socket objects rather than registering fds.
316 directly passing in socket objects rather than registering fds.
319 See: http://bugs.jython.org/issue1678
317 See: http://bugs.jython.org/issue1678
320 https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
318 https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
321 """
319 """
322
320
323 def __init__(self):
321 def __init__(self):
324 super(JythonSelectSelector, self).__init__()
322 super(JythonSelectSelector, self).__init__()
325
323
326 self._sockets = [] # Uses a list of tuples instead of dictionary.
324 self._sockets = [] # Uses a list of tuples instead of dictionary.
327 self._map = _JythonSelectorMapping(self)
325 self._map = _JythonSelectorMapping(self)
328 self._readers = []
326 self._readers = []
329 self._writers = []
327 self._writers = []
330
328
331 # Jython has a select.cpython_compatible_select function in older versions.
329 # Jython has a select.cpython_compatible_select function in older versions.
332 self._select_func = getattr(select, 'cpython_compatible_select', select.select)
330 self._select_func = getattr(select, 'cpython_compatible_select', select.select)
333
331
334 def register(self, fileobj, events, data=None):
332 def register(self, fileobj, events, data=None):
335 for sock, _ in self._sockets:
333 for sock, _ in self._sockets:
336 if sock is fileobj:
334 if sock is fileobj:
337 raise KeyError("{0!r} is already registered"
335 raise KeyError("{0!r} is already registered"
338 .format(fileobj, sock))
336 .format(fileobj, sock))
339
337
340 key = SelectorKey(fileobj, -1, events, data)
338 key = SelectorKey(fileobj, -1, events, data)
341 self._sockets.append((fileobj, key))
339 self._sockets.append((fileobj, key))
342
340
343 if events & EVENT_READ:
341 if events & EVENT_READ:
344 self._readers.append(fileobj)
342 self._readers.append(fileobj)
345 if events & EVENT_WRITE:
343 if events & EVENT_WRITE:
346 self._writers.append(fileobj)
344 self._writers.append(fileobj)
347 return key
345 return key
348
346
349 def unregister(self, fileobj):
347 def unregister(self, fileobj):
350 for i, (sock, key) in enumerate(self._sockets):
348 for i, (sock, key) in enumerate(self._sockets):
351 if sock is fileobj:
349 if sock is fileobj:
352 break
350 break
353 else:
351 else:
354 raise KeyError("{0!r} is not registered.".format(fileobj))
352 raise KeyError("{0!r} is not registered.".format(fileobj))
355
353
356 if key.events & EVENT_READ:
354 if key.events & EVENT_READ:
357 self._readers.remove(fileobj)
355 self._readers.remove(fileobj)
358 if key.events & EVENT_WRITE:
356 if key.events & EVENT_WRITE:
359 self._writers.remove(fileobj)
357 self._writers.remove(fileobj)
360
358
361 del self._sockets[i]
359 del self._sockets[i]
362 return key
360 return key
363
361
364 def _wrap_select(self, r, w, timeout=None):
362 def _wrap_select(self, r, w, timeout=None):
365 """ Wrapper for select.select because timeout is a positional arg """
363 """ Wrapper for select.select because timeout is a positional arg """
366 return self._select_func(r, w, [], timeout)
364 return self._select_func(r, w, [], timeout)
367
365
368 __all__.append('JythonSelectSelector')
366 __all__.append('JythonSelectSelector')
369 SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
367 SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
370
368
371
369
372 if hasattr(select, "poll"):
370 if hasattr(select, "poll"):
373 class PollSelector(BaseSelector):
371 class PollSelector(BaseSelector):
374 """ Poll-based selector """
372 """ Poll-based selector """
375 def __init__(self):
373 def __init__(self):
376 super(PollSelector, self).__init__()
374 super(PollSelector, self).__init__()
377 self._poll = select.poll()
375 self._poll = select.poll()
378
376
379 def register(self, fileobj, events, data=None):
377 def register(self, fileobj, events, data=None):
380 key = super(PollSelector, self).register(fileobj, events, data)
378 key = super(PollSelector, self).register(fileobj, events, data)
381 event_mask = 0
379 event_mask = 0
382 if events & EVENT_READ:
380 if events & EVENT_READ:
383 event_mask |= select.POLLIN
381 event_mask |= select.POLLIN
384 if events & EVENT_WRITE:
382 if events & EVENT_WRITE:
385 event_mask |= select.POLLOUT
383 event_mask |= select.POLLOUT
386 self._poll.register(key.fd, event_mask)
384 self._poll.register(key.fd, event_mask)
387 return key
385 return key
388
386
389 def unregister(self, fileobj):
387 def unregister(self, fileobj):
390 key = super(PollSelector, self).unregister(fileobj)
388 key = super(PollSelector, self).unregister(fileobj)
391 self._poll.unregister(key.fd)
389 self._poll.unregister(key.fd)
392 return key
390 return key
393
391
394 def _wrap_poll(self, timeout=None):
392 def _wrap_poll(self, timeout=None):
395 """ Wrapper function for select.poll.poll() so that
393 """ Wrapper function for select.poll.poll() so that
396 _syscall_wrapper can work with only seconds. """
394 _syscall_wrapper can work with only seconds. """
397 if timeout is not None:
395 if timeout is not None:
398 if timeout <= 0:
396 if timeout <= 0:
399 timeout = 0
397 timeout = 0
400 else:
398 else:
401 # select.poll.poll() has a resolution of 1 millisecond,
399 # select.poll.poll() has a resolution of 1 millisecond,
402 # round away from zero to wait *at least* timeout seconds.
400 # round away from zero to wait *at least* timeout seconds.
403 timeout = math.ceil(timeout * 1000)
401 timeout = math.ceil(timeout * 1000)
404
402
405 result = self._poll.poll(timeout)
403 result = self._poll.poll(timeout)
406 return result
404 return result
407
405
408 def select(self, timeout=None):
406 def select(self, timeout=None):
409 ready = []
407 ready = []
410 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
408 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
411 for fd, event_mask in fd_events:
409 for fd, event_mask in fd_events:
412 events = 0
410 events = 0
413 if event_mask & ~select.POLLIN:
411 if event_mask & ~select.POLLIN:
414 events |= EVENT_WRITE
412 events |= EVENT_WRITE
415 if event_mask & ~select.POLLOUT:
413 if event_mask & ~select.POLLOUT:
416 events |= EVENT_READ
414 events |= EVENT_READ
417
415
418 key = self._key_from_fd(fd)
416 key = self._key_from_fd(fd)
419 if key:
417 if key:
420 ready.append((key, events & key.events))
418 ready.append((key, events & key.events))
421
419
422 return ready
420 return ready
423
421
424 __all__.append('PollSelector')
422 __all__.append('PollSelector')
425
423
426 if hasattr(select, "epoll"):
424 if hasattr(select, "epoll"):
427 class EpollSelector(BaseSelector):
425 class EpollSelector(BaseSelector):
428 """ Epoll-based selector """
426 """ Epoll-based selector """
429 def __init__(self):
427 def __init__(self):
430 super(EpollSelector, self).__init__()
428 super(EpollSelector, self).__init__()
431 self._epoll = select.epoll()
429 self._epoll = select.epoll()
432
430
433 def fileno(self):
431 def fileno(self):
434 return self._epoll.fileno()
432 return self._epoll.fileno()
435
433
436 def register(self, fileobj, events, data=None):
434 def register(self, fileobj, events, data=None):
437 key = super(EpollSelector, self).register(fileobj, events, data)
435 key = super(EpollSelector, self).register(fileobj, events, data)
438 events_mask = 0
436 events_mask = 0
439 if events & EVENT_READ:
437 if events & EVENT_READ:
440 events_mask |= select.EPOLLIN
438 events_mask |= select.EPOLLIN
441 if events & EVENT_WRITE:
439 if events & EVENT_WRITE:
442 events_mask |= select.EPOLLOUT
440 events_mask |= select.EPOLLOUT
443 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
441 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
444 return key
442 return key
445
443
446 def unregister(self, fileobj):
444 def unregister(self, fileobj):
447 key = super(EpollSelector, self).unregister(fileobj)
445 key = super(EpollSelector, self).unregister(fileobj)
448 try:
446 try:
449 _syscall_wrapper(self._epoll.unregister, False, key.fd)
447 _syscall_wrapper(self._epoll.unregister, False, key.fd)
450 except _ERROR_TYPES:
448 except _ERROR_TYPES:
451 # This can occur when the fd was closed since registry.
449 # This can occur when the fd was closed since registry.
452 pass
450 pass
453 return key
451 return key
454
452
455 def select(self, timeout=None):
453 def select(self, timeout=None):
456 if timeout is not None:
454 if timeout is not None:
457 if timeout <= 0:
455 if timeout <= 0:
458 timeout = 0.0
456 timeout = 0.0
459 else:
457 else:
460 # select.epoll.poll() has a resolution of 1 millisecond
458 # select.epoll.poll() has a resolution of 1 millisecond
461 # but luckily takes seconds so we don't need a wrapper
459 # but luckily takes seconds so we don't need a wrapper
462 # like PollSelector. Just for better rounding.
460 # like PollSelector. Just for better rounding.
463 timeout = math.ceil(timeout * 1000) * 0.001
461 timeout = math.ceil(timeout * 1000) * 0.001
464 timeout = float(timeout)
462 timeout = float(timeout)
465 else:
463 else:
466 timeout = -1.0 # epoll.poll() must have a float.
464 timeout = -1.0 # epoll.poll() must have a float.
467
465
468 # We always want at least 1 to ensure that select can be called
466 # We always want at least 1 to ensure that select can be called
469 # with no file descriptors registered. Otherwise will fail.
467 # with no file descriptors registered. Otherwise will fail.
470 max_events = max(len(self._fd_to_key), 1)
468 max_events = max(len(self._fd_to_key), 1)
471
469
472 ready = []
470 ready = []
473 fd_events = _syscall_wrapper(self._epoll.poll, True,
471 fd_events = _syscall_wrapper(self._epoll.poll, True,
474 timeout=timeout,
472 timeout=timeout,
475 maxevents=max_events)
473 maxevents=max_events)
476 for fd, event_mask in fd_events:
474 for fd, event_mask in fd_events:
477 events = 0
475 events = 0
478 if event_mask & ~select.EPOLLIN:
476 if event_mask & ~select.EPOLLIN:
479 events |= EVENT_WRITE
477 events |= EVENT_WRITE
480 if event_mask & ~select.EPOLLOUT:
478 if event_mask & ~select.EPOLLOUT:
481 events |= EVENT_READ
479 events |= EVENT_READ
482
480
483 key = self._key_from_fd(fd)
481 key = self._key_from_fd(fd)
484 if key:
482 if key:
485 ready.append((key, events & key.events))
483 ready.append((key, events & key.events))
486 return ready
484 return ready
487
485
488 def close(self):
486 def close(self):
489 self._epoll.close()
487 self._epoll.close()
490 super(EpollSelector, self).close()
488 super(EpollSelector, self).close()
491
489
492 __all__.append('EpollSelector')
490 __all__.append('EpollSelector')
493
491
494
492
495 if hasattr(select, "devpoll"):
493 if hasattr(select, "devpoll"):
496 class DevpollSelector(BaseSelector):
494 class DevpollSelector(BaseSelector):
497 """Solaris /dev/poll selector."""
495 """Solaris /dev/poll selector."""
498
496
499 def __init__(self):
497 def __init__(self):
500 super(DevpollSelector, self).__init__()
498 super(DevpollSelector, self).__init__()
501 self._devpoll = select.devpoll()
499 self._devpoll = select.devpoll()
502
500
503 def fileno(self):
501 def fileno(self):
504 return self._devpoll.fileno()
502 return self._devpoll.fileno()
505
503
506 def register(self, fileobj, events, data=None):
504 def register(self, fileobj, events, data=None):
507 key = super(DevpollSelector, self).register(fileobj, events, data)
505 key = super(DevpollSelector, self).register(fileobj, events, data)
508 poll_events = 0
506 poll_events = 0
509 if events & EVENT_READ:
507 if events & EVENT_READ:
510 poll_events |= select.POLLIN
508 poll_events |= select.POLLIN
511 if events & EVENT_WRITE:
509 if events & EVENT_WRITE:
512 poll_events |= select.POLLOUT
510 poll_events |= select.POLLOUT
513 self._devpoll.register(key.fd, poll_events)
511 self._devpoll.register(key.fd, poll_events)
514 return key
512 return key
515
513
516 def unregister(self, fileobj):
514 def unregister(self, fileobj):
517 key = super(DevpollSelector, self).unregister(fileobj)
515 key = super(DevpollSelector, self).unregister(fileobj)
518 self._devpoll.unregister(key.fd)
516 self._devpoll.unregister(key.fd)
519 return key
517 return key
520
518
521 def _wrap_poll(self, timeout=None):
519 def _wrap_poll(self, timeout=None):
522 """ Wrapper function for select.poll.poll() so that
520 """ Wrapper function for select.poll.poll() so that
523 _syscall_wrapper can work with only seconds. """
521 _syscall_wrapper can work with only seconds. """
524 if timeout is not None:
522 if timeout is not None:
525 if timeout <= 0:
523 if timeout <= 0:
526 timeout = 0
524 timeout = 0
527 else:
525 else:
528 # select.devpoll.poll() has a resolution of 1 millisecond,
526 # select.devpoll.poll() has a resolution of 1 millisecond,
529 # round away from zero to wait *at least* timeout seconds.
527 # round away from zero to wait *at least* timeout seconds.
530 timeout = math.ceil(timeout * 1000)
528 timeout = math.ceil(timeout * 1000)
531
529
532 result = self._devpoll.poll(timeout)
530 result = self._devpoll.poll(timeout)
533 return result
531 return result
534
532
535 def select(self, timeout=None):
533 def select(self, timeout=None):
536 ready = []
534 ready = []
537 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
535 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
538 for fd, event_mask in fd_events:
536 for fd, event_mask in fd_events:
539 events = 0
537 events = 0
540 if event_mask & ~select.POLLIN:
538 if event_mask & ~select.POLLIN:
541 events |= EVENT_WRITE
539 events |= EVENT_WRITE
542 if event_mask & ~select.POLLOUT:
540 if event_mask & ~select.POLLOUT:
543 events |= EVENT_READ
541 events |= EVENT_READ
544
542
545 key = self._key_from_fd(fd)
543 key = self._key_from_fd(fd)
546 if key:
544 if key:
547 ready.append((key, events & key.events))
545 ready.append((key, events & key.events))
548
546
549 return ready
547 return ready
550
548
551 def close(self):
549 def close(self):
552 self._devpoll.close()
550 self._devpoll.close()
553 super(DevpollSelector, self).close()
551 super(DevpollSelector, self).close()
554
552
555 __all__.append('DevpollSelector')
553 __all__.append('DevpollSelector')
556
554
557
555
558 if hasattr(select, "kqueue"):
556 if hasattr(select, "kqueue"):
559 class KqueueSelector(BaseSelector):
557 class KqueueSelector(BaseSelector):
560 """ Kqueue / Kevent-based selector """
558 """ Kqueue / Kevent-based selector """
561 def __init__(self):
559 def __init__(self):
562 super(KqueueSelector, self).__init__()
560 super(KqueueSelector, self).__init__()
563 self._kqueue = select.kqueue()
561 self._kqueue = select.kqueue()
564
562
565 def fileno(self):
563 def fileno(self):
566 return self._kqueue.fileno()
564 return self._kqueue.fileno()
567
565
568 def register(self, fileobj, events, data=None):
566 def register(self, fileobj, events, data=None):
569 key = super(KqueueSelector, self).register(fileobj, events, data)
567 key = super(KqueueSelector, self).register(fileobj, events, data)
570 if events & EVENT_READ:
568 if events & EVENT_READ:
571 kevent = select.kevent(key.fd,
569 kevent = select.kevent(key.fd,
572 select.KQ_FILTER_READ,
570 select.KQ_FILTER_READ,
573 select.KQ_EV_ADD)
571 select.KQ_EV_ADD)
574
572
575 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
573 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
576
574
577 if events & EVENT_WRITE:
575 if events & EVENT_WRITE:
578 kevent = select.kevent(key.fd,
576 kevent = select.kevent(key.fd,
579 select.KQ_FILTER_WRITE,
577 select.KQ_FILTER_WRITE,
580 select.KQ_EV_ADD)
578 select.KQ_EV_ADD)
581
579
582 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
580 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
583
581
584 return key
582 return key
585
583
586 def unregister(self, fileobj):
584 def unregister(self, fileobj):
587 key = super(KqueueSelector, self).unregister(fileobj)
585 key = super(KqueueSelector, self).unregister(fileobj)
588 if key.events & EVENT_READ:
586 if key.events & EVENT_READ:
589 kevent = select.kevent(key.fd,
587 kevent = select.kevent(key.fd,
590 select.KQ_FILTER_READ,
588 select.KQ_FILTER_READ,
591 select.KQ_EV_DELETE)
589 select.KQ_EV_DELETE)
592 try:
590 try:
593 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
591 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
594 except _ERROR_TYPES:
592 except _ERROR_TYPES:
595 pass
593 pass
596 if key.events & EVENT_WRITE:
594 if key.events & EVENT_WRITE:
597 kevent = select.kevent(key.fd,
595 kevent = select.kevent(key.fd,
598 select.KQ_FILTER_WRITE,
596 select.KQ_FILTER_WRITE,
599 select.KQ_EV_DELETE)
597 select.KQ_EV_DELETE)
600 try:
598 try:
601 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
599 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
602 except _ERROR_TYPES:
600 except _ERROR_TYPES:
603 pass
601 pass
604
602
605 return key
603 return key
606
604
607 def select(self, timeout=None):
605 def select(self, timeout=None):
608 if timeout is not None:
606 if timeout is not None:
609 timeout = max(timeout, 0)
607 timeout = max(timeout, 0)
610
608
611 max_events = len(self._fd_to_key) * 2
609 max_events = len(self._fd_to_key) * 2
612 ready_fds = {}
610 ready_fds = {}
613
611
614 kevent_list = _syscall_wrapper(self._kqueue.control, True,
612 kevent_list = _syscall_wrapper(self._kqueue.control, True,
615 None, max_events, timeout)
613 None, max_events, timeout)
616
614
617 for kevent in kevent_list:
615 for kevent in kevent_list:
618 fd = kevent.ident
616 fd = kevent.ident
619 event_mask = kevent.filter
617 event_mask = kevent.filter
620 events = 0
618 events = 0
621 if event_mask == select.KQ_FILTER_READ:
619 if event_mask == select.KQ_FILTER_READ:
622 events |= EVENT_READ
620 events |= EVENT_READ
623 if event_mask == select.KQ_FILTER_WRITE:
621 if event_mask == select.KQ_FILTER_WRITE:
624 events |= EVENT_WRITE
622 events |= EVENT_WRITE
625
623
626 key = self._key_from_fd(fd)
624 key = self._key_from_fd(fd)
627 if key:
625 if key:
628 if key.fd not in ready_fds:
626 if key.fd not in ready_fds:
629 ready_fds[key.fd] = (key, events & key.events)
627 ready_fds[key.fd] = (key, events & key.events)
630 else:
628 else:
631 old_events = ready_fds[key.fd][1]
629 old_events = ready_fds[key.fd][1]
632 ready_fds[key.fd] = (key, (events | old_events) & key.events)
630 ready_fds[key.fd] = (key, (events | old_events) & key.events)
633
631
634 return list(ready_fds.values())
632 return list(ready_fds.values())
635
633
636 def close(self):
634 def close(self):
637 self._kqueue.close()
635 self._kqueue.close()
638 super(KqueueSelector, self).close()
636 super(KqueueSelector, self).close()
639
637
640 __all__.append('KqueueSelector')
638 __all__.append('KqueueSelector')
641
639
642
640
643 def _can_allocate(struct):
641 def _can_allocate(struct):
644 """ Checks that select structs can be allocated by the underlying
642 """ Checks that select structs can be allocated by the underlying
645 operating system, not just advertised by the select module. We don't
643 operating system, not just advertised by the select module. We don't
646 check select() because we'll be hopeful that most platforms that
644 check select() because we'll be hopeful that most platforms that
647 don't have it available will not advertise it. (ie: GAE) """
645 don't have it available will not advertise it. (ie: GAE) """
648 try:
646 try:
649 # select.poll() objects won't fail until used.
647 # select.poll() objects won't fail until used.
650 if struct == 'poll':
648 if struct == 'poll':
651 p = select.poll()
649 p = select.poll()
652 p.poll(0)
650 p.poll(0)
653
651
654 # All others will fail on allocation.
652 # All others will fail on allocation.
655 else:
653 else:
656 getattr(select, struct)().close()
654 getattr(select, struct)().close()
657 return True
655 return True
658 except (OSError, AttributeError):
656 except (OSError, AttributeError):
659 return False
657 return False
660
658
661
659
662 # Python 3.5 uses a more direct route to wrap system calls to increase speed.
660 # Python 3.5 uses a more direct route to wrap system calls to increase speed.
663 if sys.version_info >= (3, 5):
661 if sys.version_info >= (3, 5):
664 def _syscall_wrapper(func, _, *args, **kwargs):
662 def _syscall_wrapper(func, _, *args, **kwargs):
665 """ This is the short-circuit version of the below logic
663 """ This is the short-circuit version of the below logic
666 because in Python 3.5+ all selectors restart system calls. """
664 because in Python 3.5+ all selectors restart system calls. """
667 return func(*args, **kwargs)
665 return func(*args, **kwargs)
668 else:
666 else:
669 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
667 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
670 """ Wrapper function for syscalls that could fail due to EINTR.
668 """ Wrapper function for syscalls that could fail due to EINTR.
671 All functions should be retried if there is time left in the timeout
669 All functions should be retried if there is time left in the timeout
672 in accordance with PEP 475. """
670 in accordance with PEP 475. """
673 timeout = kwargs.get("timeout", None)
671 timeout = kwargs.get("timeout", None)
674 if timeout is None:
672 if timeout is None:
675 expires = None
673 expires = None
676 recalc_timeout = False
674 recalc_timeout = False
677 else:
675 else:
678 timeout = float(timeout)
676 timeout = float(timeout)
679 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
677 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
680 expires = None
678 expires = None
681 else:
679 else:
682 expires = monotonic() + timeout
680 expires = monotonic() + timeout
683
681
684 args = list(args)
682 args = list(args)
685 if recalc_timeout and "timeout" not in kwargs:
683 if recalc_timeout and "timeout" not in kwargs:
686 raise ValueError(
684 raise ValueError(
687 "Timeout must be in args or kwargs to be recalculated")
685 "Timeout must be in args or kwargs to be recalculated")
688
686
689 result = _SYSCALL_SENTINEL
687 result = _SYSCALL_SENTINEL
690 while result is _SYSCALL_SENTINEL:
688 while result is _SYSCALL_SENTINEL:
691 try:
689 try:
692 result = func(*args, **kwargs)
690 result = func(*args, **kwargs)
693 # OSError is thrown by select.select
691 # OSError is thrown by select.select
694 # IOError is thrown by select.epoll.poll
692 # IOError is thrown by select.epoll.poll
695 # select.error is thrown by select.poll.poll
693 # select.error is thrown by select.poll.poll
696 # Aren't we thankful for Python 3.x rework for exceptions?
694 # Aren't we thankful for Python 3.x rework for exceptions?
697 except (OSError, IOError, select.error) as e:
695 except (OSError, IOError, select.error) as e:
698 # select.error wasn't a subclass of OSError in the past.
696 # select.error wasn't a subclass of OSError in the past.
699 errcode = None
697 errcode = None
700 if hasattr(e, "errno"):
698 if hasattr(e, "errno"):
701 errcode = e.errno
699 errcode = e.errno
702 elif hasattr(e, "args"):
700 elif hasattr(e, "args"):
703 errcode = e.args[0]
701 errcode = e.args[0]
704
702
705 # Also test for the Windows equivalent of EINTR.
703 # Also test for the Windows equivalent of EINTR.
706 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
704 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
707 errcode == errno.WSAEINTR))
705 errcode == errno.WSAEINTR))
708
706
709 if is_interrupt:
707 if is_interrupt:
710 if expires is not None:
708 if expires is not None:
711 current_time = monotonic()
709 current_time = monotonic()
712 if current_time > expires:
710 if current_time > expires:
713 raise OSError(errno=errno.ETIMEDOUT)
711 raise OSError(errno=errno.ETIMEDOUT)
714 if recalc_timeout:
712 if recalc_timeout:
715 if "timeout" in kwargs:
713 if "timeout" in kwargs:
716 kwargs["timeout"] = expires - current_time
714 kwargs["timeout"] = expires - current_time
717 continue
715 continue
718 raise
716 raise
719 return result
717 return result
720
718
721
719
722 # Choose the best implementation, roughly:
720 # Choose the best implementation, roughly:
723 # kqueue == devpoll == epoll > poll > select
721 # kqueue == devpoll == epoll > poll > select
724 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
722 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
725 def DefaultSelector():
723 def DefaultSelector():
726 """ This function serves as a first call for DefaultSelector to
724 """ This function serves as a first call for DefaultSelector to
727 detect if the select module is being monkey-patched incorrectly
725 detect if the select module is being monkey-patched incorrectly
728 by eventlet, greenlet, and preserve proper behavior. """
726 by eventlet, greenlet, and preserve proper behavior. """
729 global _DEFAULT_SELECTOR
727 global _DEFAULT_SELECTOR
730 if _DEFAULT_SELECTOR is None:
728 if _DEFAULT_SELECTOR is None:
731 if pycompat.isjython:
729 if pycompat.isjython:
732 _DEFAULT_SELECTOR = JythonSelectSelector
730 _DEFAULT_SELECTOR = JythonSelectSelector
733 elif _can_allocate('kqueue'):
731 elif _can_allocate('kqueue'):
734 _DEFAULT_SELECTOR = KqueueSelector
732 _DEFAULT_SELECTOR = KqueueSelector
735 elif _can_allocate('devpoll'):
733 elif _can_allocate('devpoll'):
736 _DEFAULT_SELECTOR = DevpollSelector
734 _DEFAULT_SELECTOR = DevpollSelector
737 elif _can_allocate('epoll'):
735 elif _can_allocate('epoll'):
738 _DEFAULT_SELECTOR = EpollSelector
736 _DEFAULT_SELECTOR = EpollSelector
739 elif _can_allocate('poll'):
737 elif _can_allocate('poll'):
740 _DEFAULT_SELECTOR = PollSelector
738 _DEFAULT_SELECTOR = PollSelector
741 elif hasattr(select, 'select'):
739 elif hasattr(select, 'select'):
742 _DEFAULT_SELECTOR = SelectSelector
740 _DEFAULT_SELECTOR = SelectSelector
743 else: # Platform-specific: AppEngine
741 else: # Platform-specific: AppEngine
744 raise RuntimeError('Platform does not have a selector.')
742 raise RuntimeError('Platform does not have a selector.')
745 return _DEFAULT_SELECTOR()
743 return _DEFAULT_SELECTOR()
@@ -1,56 +1,55 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ check_code="$TESTDIR"/../contrib/check-code.py
4 $ check_code="$TESTDIR"/../contrib/check-code.py
5 $ cd "$TESTDIR"/..
5 $ cd "$TESTDIR"/..
6
6
7 New errors are not allowed. Warnings are strongly discouraged.
7 New errors are not allowed. Warnings are strongly discouraged.
8 (The writing "no-che?k-code" is for not skipping this file when checking.)
8 (The writing "no-che?k-code" is for not skipping this file when checking.)
9
9
10 $ testrepohg locate \
10 $ testrepohg locate \
11 > -X contrib/python-zstandard \
11 > -X contrib/python-zstandard \
12 > -X hgext/fsmonitor/pywatchman \
12 > -X hgext/fsmonitor/pywatchman \
13 > -X mercurial/thirdparty \
13 > -X mercurial/thirdparty \
14 > | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
14 > | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
15 Skipping i18n/polib.py it has no-che?k-code (glob)
15 Skipping i18n/polib.py it has no-che?k-code (glob)
16 Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
16 Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
17 Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
17 Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
18 Skipping mercurial/selectors2.py it has no-che?k-code (glob)
19 Skipping mercurial/statprof.py it has no-che?k-code (glob)
18 Skipping mercurial/statprof.py it has no-che?k-code (glob)
20 Skipping tests/badserverext.py it has no-che?k-code (glob)
19 Skipping tests/badserverext.py it has no-che?k-code (glob)
21
20
22 @commands in debugcommands.py should be in alphabetical order.
21 @commands in debugcommands.py should be in alphabetical order.
23
22
24 >>> import re
23 >>> import re
25 >>> commands = []
24 >>> commands = []
26 >>> with open('mercurial/debugcommands.py', 'rb') as fh:
25 >>> with open('mercurial/debugcommands.py', 'rb') as fh:
27 ... for line in fh:
26 ... for line in fh:
28 ... m = re.match("^@command\('([a-z]+)", line)
27 ... m = re.match("^@command\('([a-z]+)", line)
29 ... if m:
28 ... if m:
30 ... commands.append(m.group(1))
29 ... commands.append(m.group(1))
31 >>> scommands = list(sorted(commands))
30 >>> scommands = list(sorted(commands))
32 >>> for i, command in enumerate(scommands):
31 >>> for i, command in enumerate(scommands):
33 ... if command != commands[i]:
32 ... if command != commands[i]:
34 ... print('commands in debugcommands.py not sorted; first differing '
33 ... print('commands in debugcommands.py not sorted; first differing '
35 ... 'command is %s; expected %s' % (commands[i], command))
34 ... 'command is %s; expected %s' % (commands[i], command))
36 ... break
35 ... break
37
36
38 Prevent adding new files in the root directory accidentally.
37 Prevent adding new files in the root directory accidentally.
39
38
40 $ testrepohg files 'glob:*'
39 $ testrepohg files 'glob:*'
41 .arcconfig
40 .arcconfig
42 .clang-format
41 .clang-format
43 .editorconfig
42 .editorconfig
44 .hgignore
43 .hgignore
45 .hgsigs
44 .hgsigs
46 .hgtags
45 .hgtags
47 .jshintrc
46 .jshintrc
48 CONTRIBUTING
47 CONTRIBUTING
49 CONTRIBUTORS
48 CONTRIBUTORS
50 COPYING
49 COPYING
51 Makefile
50 Makefile
52 README.rst
51 README.rst
53 hg
52 hg
54 hgeditor
53 hgeditor
55 hgweb.cgi
54 hgweb.cgi
56 setup.py
55 setup.py
General Comments 0
You need to be logged in to leave comments. Login now