##// END OF EJS Templates
thirdparty: move selectors2 module to where it should be
Yuya Nishihara -
r35245:414114a7 default
parent child Browse files
Show More
@@ -1,299 +1,296 b''
1 1 # __init__.py - Startup and module loading logic for Mercurial.
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import sys
11 11
12 12 # Allow 'from mercurial import demandimport' to keep working.
13 13 import hgdemandimport
14 14 demandimport = hgdemandimport
15 15
16 16 __all__ = []
17 17
18 18 # Python 3 uses a custom module loader that transforms source code between
19 19 # source file reading and compilation. This is done by registering a custom
20 20 # finder that changes the spec for Mercurial modules to use a custom loader.
21 21 if sys.version_info[0] >= 3:
22 22 import importlib
23 23 import importlib.abc
24 24 import io
25 25 import token
26 26 import tokenize
27 27
28 28 class hgpathentryfinder(importlib.abc.MetaPathFinder):
29 29 """A sys.meta_path finder that uses a custom module loader."""
30 30 def find_spec(self, fullname, path, target=None):
31 31 # Only handle Mercurial-related modules.
32 32 if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
33 33 return None
34 # selectors2 is already dual-version clean, don't try and mangle it
35 if fullname.startswith('mercurial.selectors2'):
36 return None
37 34 # third-party packages are expected to be dual-version clean
38 35 if fullname.startswith('mercurial.thirdparty'):
39 36 return None
40 37 # zstd is already dual-version clean, don't try and mangle it
41 38 if fullname.startswith('mercurial.zstd'):
42 39 return None
43 40 # pywatchman is already dual-version clean, don't try and mangle it
44 41 if fullname.startswith('hgext.fsmonitor.pywatchman'):
45 42 return None
46 43
47 44 # Try to find the module using other registered finders.
48 45 spec = None
49 46 for finder in sys.meta_path:
50 47 if finder == self:
51 48 continue
52 49
53 50 spec = finder.find_spec(fullname, path, target=target)
54 51 if spec:
55 52 break
56 53
57 54 # This is a Mercurial-related module but we couldn't find it
58 55 # using the previously-registered finders. This likely means
59 56 # the module doesn't exist.
60 57 if not spec:
61 58 return None
62 59
63 60 # TODO need to support loaders from alternate specs, like zip
64 61 # loaders.
65 62 loader = hgloader(spec.name, spec.origin)
66 63 # Can't use util.safehasattr here because that would require
67 64 # importing util, and we're in import code.
68 65 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
69 66 # This is a nested loader (maybe a lazy loader?)
70 67 spec.loader.loader = loader
71 68 else:
72 69 spec.loader = loader
73 70 return spec
74 71
75 72 def replacetokens(tokens, fullname):
76 73 """Transform a stream of tokens from raw to Python 3.
77 74
78 75 It is called by the custom module loading machinery to rewrite
79 76 source/tokens between source decoding and compilation.
80 77
81 78 Returns a generator of possibly rewritten tokens.
82 79
83 80 The input token list may be mutated as part of processing. However,
84 81 its changes do not necessarily match the output token stream.
85 82
86 83 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
87 84 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
88 85 """
89 86 futureimpline = False
90 87
91 88 # The following utility functions access the tokens list and i index of
92 89 # the for i, t enumerate(tokens) loop below
93 90 def _isop(j, *o):
94 91 """Assert that tokens[j] is an OP with one of the given values"""
95 92 try:
96 93 return tokens[j].type == token.OP and tokens[j].string in o
97 94 except IndexError:
98 95 return False
99 96
100 97 def _findargnofcall(n):
101 98 """Find arg n of a call expression (start at 0)
102 99
103 100 Returns index of the first token of that argument, or None if
104 101 there is not that many arguments.
105 102
106 103 Assumes that token[i + 1] is '('.
107 104
108 105 """
109 106 nested = 0
110 107 for j in range(i + 2, len(tokens)):
111 108 if _isop(j, ')', ']', '}'):
112 109 # end of call, tuple, subscription or dict / set
113 110 nested -= 1
114 111 if nested < 0:
115 112 return None
116 113 elif n == 0:
117 114 # this is the starting position of arg
118 115 return j
119 116 elif _isop(j, '(', '[', '{'):
120 117 nested += 1
121 118 elif _isop(j, ',') and nested == 0:
122 119 n -= 1
123 120
124 121 return None
125 122
126 123 def _ensureunicode(j):
127 124 """Make sure the token at j is a unicode string
128 125
129 126 This rewrites a string token to include the unicode literal prefix
130 127 so the string transformer won't add the byte prefix.
131 128
132 129 Ignores tokens that are not strings. Assumes bounds checking has
133 130 already been done.
134 131
135 132 """
136 133 st = tokens[j]
137 134 if st.type == token.STRING and st.string.startswith(("'", '"')):
138 135 tokens[j] = st._replace(string='u%s' % st.string)
139 136
140 137 for i, t in enumerate(tokens):
141 138 # Convert most string literals to byte literals. String literals
142 139 # in Python 2 are bytes. String literals in Python 3 are unicode.
143 140 # Most strings in Mercurial are bytes and unicode strings are rare.
144 141 # Rather than rewrite all string literals to use ``b''`` to indicate
145 142 # byte strings, we apply this token transformer to insert the ``b``
146 143 # prefix nearly everywhere.
147 144 if t.type == token.STRING:
148 145 s = t.string
149 146
150 147 # Preserve docstrings as string literals. This is inconsistent
151 148 # with regular unprefixed strings. However, the
152 149 # "from __future__" parsing (which allows a module docstring to
153 150 # exist before it) doesn't properly handle the docstring if it
154 151 # is b''' prefixed, leading to a SyntaxError. We leave all
155 152 # docstrings as unprefixed to avoid this. This means Mercurial
156 153 # components touching docstrings need to handle unicode,
157 154 # unfortunately.
158 155 if s[0:3] in ("'''", '"""'):
159 156 yield t
160 157 continue
161 158
162 159 # If the first character isn't a quote, it is likely a string
163 160 # prefixing character (such as 'b', 'u', or 'r'. Ignore.
164 161 if s[0] not in ("'", '"'):
165 162 yield t
166 163 continue
167 164
168 165 # String literal. Prefix to make a b'' string.
169 166 yield t._replace(string='b%s' % t.string)
170 167 continue
171 168
172 169 # Insert compatibility imports at "from __future__ import" line.
173 170 # No '\n' should be added to preserve line numbers.
174 171 if (t.type == token.NAME and t.string == 'import' and
175 172 all(u.type == token.NAME for u in tokens[i - 2:i]) and
176 173 [u.string for u in tokens[i - 2:i]] == ['from', '__future__']):
177 174 futureimpline = True
178 175 if t.type == token.NEWLINE and futureimpline:
179 176 futureimpline = False
180 177 if fullname == 'mercurial.pycompat':
181 178 yield t
182 179 continue
183 180 r, c = t.start
184 181 l = (b'; from mercurial.pycompat import '
185 182 b'delattr, getattr, hasattr, setattr, xrange, '
186 183 b'open, unicode\n')
187 184 for u in tokenize.tokenize(io.BytesIO(l).readline):
188 185 if u.type in (tokenize.ENCODING, token.ENDMARKER):
189 186 continue
190 187 yield u._replace(
191 188 start=(r, c + u.start[1]), end=(r, c + u.end[1]))
192 189 continue
193 190
194 191 # This looks like a function call.
195 192 if t.type == token.NAME and _isop(i + 1, '('):
196 193 fn = t.string
197 194
198 195 # *attr() builtins don't accept byte strings to 2nd argument.
199 196 if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and
200 197 not _isop(i - 1, '.')):
201 198 arg1idx = _findargnofcall(1)
202 199 if arg1idx is not None:
203 200 _ensureunicode(arg1idx)
204 201
205 202 # .encode() and .decode() on str/bytes/unicode don't accept
206 203 # byte strings on Python 3.
207 204 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
208 205 for argn in range(2):
209 206 argidx = _findargnofcall(argn)
210 207 if argidx is not None:
211 208 _ensureunicode(argidx)
212 209
213 210 # It changes iteritems/values to items/values as they are not
214 211 # present in Python 3 world.
215 212 elif fn in ('iteritems', 'itervalues'):
216 213 yield t._replace(string=fn[4:])
217 214 continue
218 215
219 216 # Emit unmodified token.
220 217 yield t
221 218
222 219 # Header to add to bytecode files. This MUST be changed when
223 220 # ``replacetoken`` or any mechanism that changes semantics of module
224 221 # loading is changed. Otherwise cached bytecode may get loaded without
225 222 # the new transformation mechanisms applied.
226 223 BYTECODEHEADER = b'HG\x00\x0a'
227 224
228 225 class hgloader(importlib.machinery.SourceFileLoader):
229 226 """Custom module loader that transforms source code.
230 227
231 228 When the source code is converted to a code object, we transform
232 229 certain patterns to be Python 3 compatible. This allows us to write code
233 230 that is natively Python 2 and compatible with Python 3 without
234 231 making the code excessively ugly.
235 232
236 233 We do this by transforming the token stream between parse and compile.
237 234
238 235 Implementing transformations invalidates caching assumptions made
239 236 by the built-in importer. The built-in importer stores a header on
240 237 saved bytecode files indicating the Python/bytecode version. If the
241 238 version changes, the cached bytecode is ignored. The Mercurial
242 239 transformations could change at any time. This means we need to check
243 240 that cached bytecode was generated with the current transformation
244 241 code or there could be a mismatch between cached bytecode and what
245 242 would be generated from this class.
246 243
247 244 We supplement the bytecode caching layer by wrapping ``get_data``
248 245 and ``set_data``. These functions are called when the
249 246 ``SourceFileLoader`` retrieves and saves bytecode cache files,
250 247 respectively. We simply add an additional header on the file. As
251 248 long as the version in this file is changed when semantics change,
252 249 cached bytecode should be invalidated when transformations change.
253 250
254 251 The added header has the form ``HG<VERSION>``. That is a literal
255 252 ``HG`` with 2 binary bytes indicating the transformation version.
256 253 """
257 254 def get_data(self, path):
258 255 data = super(hgloader, self).get_data(path)
259 256
260 257 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
261 258 return data
262 259
263 260 # There should be a header indicating the Mercurial transformation
264 261 # version. If it doesn't exist or doesn't match the current version,
265 262 # we raise an OSError because that is what
266 263 # ``SourceFileLoader.get_code()`` expects when loading bytecode
267 264 # paths to indicate the cached file is "bad."
268 265 if data[0:2] != b'HG':
269 266 raise OSError('no hg header')
270 267 if data[0:4] != BYTECODEHEADER:
271 268 raise OSError('hg header version mismatch')
272 269
273 270 return data[4:]
274 271
275 272 def set_data(self, path, data, *args, **kwargs):
276 273 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
277 274 data = BYTECODEHEADER + data
278 275
279 276 return super(hgloader, self).set_data(path, data, *args, **kwargs)
280 277
281 278 def source_to_code(self, data, path):
282 279 """Perform token transformation before compilation."""
283 280 buf = io.BytesIO(data)
284 281 tokens = tokenize.tokenize(buf.readline)
285 282 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
286 283 # Python's built-in importer strips frames from exceptions raised
287 284 # for this code. Unfortunately, that mechanism isn't extensible
288 285 # and our frame will be blamed for the import failure. There
289 286 # are extremely hacky ways to do frame stripping. We haven't
290 287 # implemented them because they are very ugly.
291 288 return super(hgloader, self).source_to_code(data, path)
292 289
293 290 # We automagically register our custom importer as a side-effect of
294 291 # loading. This is necessary to ensure that any entry points are able
295 292 # to import mercurial.* modules without having to perform this
296 293 # registration themselves.
297 294 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
298 295 # meta_path is used before any implicit finders and before sys.path.
299 296 sys.meta_path.insert(0, hgpathentryfinder())
@@ -1,549 +1,549 b''
1 1 # commandserver.py - communicate with Mercurial's API over a pipe
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import gc
12 12 import os
13 13 import random
14 14 import signal
15 15 import socket
16 16 import struct
17 17 import traceback
18 18
19 19 from .i18n import _
20 from .thirdparty import selectors2
20 21 from . import (
21 22 encoding,
22 23 error,
23 24 pycompat,
24 selectors2,
25 25 util,
26 26 )
27 27
28 28 logfile = None
29 29
30 30 def log(*args):
31 31 if not logfile:
32 32 return
33 33
34 34 for a in args:
35 35 logfile.write(str(a))
36 36
37 37 logfile.flush()
38 38
39 39 class channeledoutput(object):
40 40 """
41 41 Write data to out in the following format:
42 42
43 43 data length (unsigned int),
44 44 data
45 45 """
46 46 def __init__(self, out, channel):
47 47 self.out = out
48 48 self.channel = channel
49 49
50 50 @property
51 51 def name(self):
52 52 return '<%c-channel>' % self.channel
53 53
54 54 def write(self, data):
55 55 if not data:
56 56 return
57 57 # single write() to guarantee the same atomicity as the underlying file
58 58 self.out.write(struct.pack('>cI', self.channel, len(data)) + data)
59 59 self.out.flush()
60 60
61 61 def __getattr__(self, attr):
62 62 if attr in ('isatty', 'fileno', 'tell', 'seek'):
63 63 raise AttributeError(attr)
64 64 return getattr(self.out, attr)
65 65
66 66 class channeledinput(object):
67 67 """
68 68 Read data from in_.
69 69
70 70 Requests for input are written to out in the following format:
71 71 channel identifier - 'I' for plain input, 'L' line based (1 byte)
72 72 how many bytes to send at most (unsigned int),
73 73
74 74 The client replies with:
75 75 data length (unsigned int), 0 meaning EOF
76 76 data
77 77 """
78 78
79 79 maxchunksize = 4 * 1024
80 80
81 81 def __init__(self, in_, out, channel):
82 82 self.in_ = in_
83 83 self.out = out
84 84 self.channel = channel
85 85
86 86 @property
87 87 def name(self):
88 88 return '<%c-channel>' % self.channel
89 89
90 90 def read(self, size=-1):
91 91 if size < 0:
92 92 # if we need to consume all the clients input, ask for 4k chunks
93 93 # so the pipe doesn't fill up risking a deadlock
94 94 size = self.maxchunksize
95 95 s = self._read(size, self.channel)
96 96 buf = s
97 97 while s:
98 98 s = self._read(size, self.channel)
99 99 buf += s
100 100
101 101 return buf
102 102 else:
103 103 return self._read(size, self.channel)
104 104
105 105 def _read(self, size, channel):
106 106 if not size:
107 107 return ''
108 108 assert size > 0
109 109
110 110 # tell the client we need at most size bytes
111 111 self.out.write(struct.pack('>cI', channel, size))
112 112 self.out.flush()
113 113
114 114 length = self.in_.read(4)
115 115 length = struct.unpack('>I', length)[0]
116 116 if not length:
117 117 return ''
118 118 else:
119 119 return self.in_.read(length)
120 120
121 121 def readline(self, size=-1):
122 122 if size < 0:
123 123 size = self.maxchunksize
124 124 s = self._read(size, 'L')
125 125 buf = s
126 126 # keep asking for more until there's either no more or
127 127 # we got a full line
128 128 while s and s[-1] != '\n':
129 129 s = self._read(size, 'L')
130 130 buf += s
131 131
132 132 return buf
133 133 else:
134 134 return self._read(size, 'L')
135 135
136 136 def __iter__(self):
137 137 return self
138 138
139 139 def next(self):
140 140 l = self.readline()
141 141 if not l:
142 142 raise StopIteration
143 143 return l
144 144
145 145 def __getattr__(self, attr):
146 146 if attr in ('isatty', 'fileno', 'tell', 'seek'):
147 147 raise AttributeError(attr)
148 148 return getattr(self.in_, attr)
149 149
150 150 class server(object):
151 151 """
152 152 Listens for commands on fin, runs them and writes the output on a channel
153 153 based stream to fout.
154 154 """
155 155 def __init__(self, ui, repo, fin, fout):
156 156 self.cwd = pycompat.getcwd()
157 157
158 158 # developer config: cmdserver.log
159 159 logpath = ui.config("cmdserver", "log")
160 160 if logpath:
161 161 global logfile
162 162 if logpath == '-':
163 163 # write log on a special 'd' (debug) channel
164 164 logfile = channeledoutput(fout, 'd')
165 165 else:
166 166 logfile = open(logpath, 'a')
167 167
168 168 if repo:
169 169 # the ui here is really the repo ui so take its baseui so we don't
170 170 # end up with its local configuration
171 171 self.ui = repo.baseui
172 172 self.repo = repo
173 173 self.repoui = repo.ui
174 174 else:
175 175 self.ui = ui
176 176 self.repo = self.repoui = None
177 177
178 178 self.cerr = channeledoutput(fout, 'e')
179 179 self.cout = channeledoutput(fout, 'o')
180 180 self.cin = channeledinput(fin, fout, 'I')
181 181 self.cresult = channeledoutput(fout, 'r')
182 182
183 183 self.client = fin
184 184
185 185 def cleanup(self):
186 186 """release and restore resources taken during server session"""
187 187
188 188 def _read(self, size):
189 189 if not size:
190 190 return ''
191 191
192 192 data = self.client.read(size)
193 193
194 194 # is the other end closed?
195 195 if not data:
196 196 raise EOFError
197 197
198 198 return data
199 199
200 200 def _readstr(self):
201 201 """read a string from the channel
202 202
203 203 format:
204 204 data length (uint32), data
205 205 """
206 206 length = struct.unpack('>I', self._read(4))[0]
207 207 if not length:
208 208 return ''
209 209 return self._read(length)
210 210
211 211 def _readlist(self):
212 212 """read a list of NULL separated strings from the channel"""
213 213 s = self._readstr()
214 214 if s:
215 215 return s.split('\0')
216 216 else:
217 217 return []
218 218
219 219 def runcommand(self):
220 220 """ reads a list of \0 terminated arguments, executes
221 221 and writes the return code to the result channel """
222 222 from . import dispatch # avoid cycle
223 223
224 224 args = self._readlist()
225 225
226 226 # copy the uis so changes (e.g. --config or --verbose) don't
227 227 # persist between requests
228 228 copiedui = self.ui.copy()
229 229 uis = [copiedui]
230 230 if self.repo:
231 231 self.repo.baseui = copiedui
232 232 # clone ui without using ui.copy because this is protected
233 233 repoui = self.repoui.__class__(self.repoui)
234 234 repoui.copy = copiedui.copy # redo copy protection
235 235 uis.append(repoui)
236 236 self.repo.ui = self.repo.dirstate._ui = repoui
237 237 self.repo.invalidateall()
238 238
239 239 for ui in uis:
240 240 ui.resetstate()
241 241 # any kind of interaction must use server channels, but chg may
242 242 # replace channels by fully functional tty files. so nontty is
243 243 # enforced only if cin is a channel.
244 244 if not util.safehasattr(self.cin, 'fileno'):
245 245 ui.setconfig('ui', 'nontty', 'true', 'commandserver')
246 246
247 247 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
248 248 self.cout, self.cerr)
249 249
250 250 ret = (dispatch.dispatch(req) or 0) & 255 # might return None
251 251
252 252 # restore old cwd
253 253 if '--cwd' in args:
254 254 os.chdir(self.cwd)
255 255
256 256 self.cresult.write(struct.pack('>i', int(ret)))
257 257
258 258 def getencoding(self):
259 259 """ writes the current encoding to the result channel """
260 260 self.cresult.write(encoding.encoding)
261 261
262 262 def serveone(self):
263 263 cmd = self.client.readline()[:-1]
264 264 if cmd:
265 265 handler = self.capabilities.get(cmd)
266 266 if handler:
267 267 handler(self)
268 268 else:
269 269 # clients are expected to check what commands are supported by
270 270 # looking at the servers capabilities
271 271 raise error.Abort(_('unknown command %s') % cmd)
272 272
273 273 return cmd != ''
274 274
275 275 capabilities = {'runcommand': runcommand,
276 276 'getencoding': getencoding}
277 277
278 278 def serve(self):
279 279 hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
280 280 hellomsg += '\n'
281 281 hellomsg += 'encoding: ' + encoding.encoding
282 282 hellomsg += '\n'
283 283 hellomsg += 'pid: %d' % util.getpid()
284 284 if util.safehasattr(os, 'getpgid'):
285 285 hellomsg += '\n'
286 286 hellomsg += 'pgid: %d' % os.getpgid(0)
287 287
288 288 # write the hello msg in -one- chunk
289 289 self.cout.write(hellomsg)
290 290
291 291 try:
292 292 while self.serveone():
293 293 pass
294 294 except EOFError:
295 295 # we'll get here if the client disconnected while we were reading
296 296 # its request
297 297 return 1
298 298
299 299 return 0
300 300
301 301 def _protectio(ui):
302 302 """ duplicates streams and redirect original to null if ui uses stdio """
303 303 ui.flush()
304 304 newfiles = []
305 305 nullfd = os.open(os.devnull, os.O_RDWR)
306 306 for f, sysf, mode in [(ui.fin, util.stdin, pycompat.sysstr('rb')),
307 307 (ui.fout, util.stdout, pycompat.sysstr('wb'))]:
308 308 if f is sysf:
309 309 newfd = os.dup(f.fileno())
310 310 os.dup2(nullfd, f.fileno())
311 311 f = os.fdopen(newfd, mode)
312 312 newfiles.append(f)
313 313 os.close(nullfd)
314 314 return tuple(newfiles)
315 315
316 316 def _restoreio(ui, fin, fout):
317 317 """ restores streams from duplicated ones """
318 318 ui.flush()
319 319 for f, uif in [(fin, ui.fin), (fout, ui.fout)]:
320 320 if f is not uif:
321 321 os.dup2(f.fileno(), uif.fileno())
322 322 f.close()
323 323
324 324 class pipeservice(object):
325 325 def __init__(self, ui, repo, opts):
326 326 self.ui = ui
327 327 self.repo = repo
328 328
329 329 def init(self):
330 330 pass
331 331
332 332 def run(self):
333 333 ui = self.ui
334 334 # redirect stdio to null device so that broken extensions or in-process
335 335 # hooks will never cause corruption of channel protocol.
336 336 fin, fout = _protectio(ui)
337 337 try:
338 338 sv = server(ui, self.repo, fin, fout)
339 339 return sv.serve()
340 340 finally:
341 341 sv.cleanup()
342 342 _restoreio(ui, fin, fout)
343 343
344 344 def _initworkerprocess():
345 345 # use a different process group from the master process, in order to:
346 346 # 1. make the current process group no longer "orphaned" (because the
347 347 # parent of this process is in a different process group while
348 348 # remains in a same session)
349 349 # according to POSIX 2.2.2.52, orphaned process group will ignore
350 350 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
351 351 # cause trouble for things like ncurses.
352 352 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
353 353 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
354 354 # processes like ssh will be killed properly, without affecting
355 355 # unrelated processes.
356 356 os.setpgid(0, 0)
357 357 # change random state otherwise forked request handlers would have a
358 358 # same state inherited from parent.
359 359 random.seed()
360 360
361 361 def _serverequest(ui, repo, conn, createcmdserver):
362 362 fin = conn.makefile('rb')
363 363 fout = conn.makefile('wb')
364 364 sv = None
365 365 try:
366 366 sv = createcmdserver(repo, conn, fin, fout)
367 367 try:
368 368 sv.serve()
369 369 # handle exceptions that may be raised by command server. most of
370 370 # known exceptions are caught by dispatch.
371 371 except error.Abort as inst:
372 372 ui.warn(_('abort: %s\n') % inst)
373 373 except IOError as inst:
374 374 if inst.errno != errno.EPIPE:
375 375 raise
376 376 except KeyboardInterrupt:
377 377 pass
378 378 finally:
379 379 sv.cleanup()
380 380 except: # re-raises
381 381 # also write traceback to error channel. otherwise client cannot
382 382 # see it because it is written to server's stderr by default.
383 383 if sv:
384 384 cerr = sv.cerr
385 385 else:
386 386 cerr = channeledoutput(fout, 'e')
387 387 traceback.print_exc(file=cerr)
388 388 raise
389 389 finally:
390 390 fin.close()
391 391 try:
392 392 fout.close() # implicit flush() may cause another EPIPE
393 393 except IOError as inst:
394 394 if inst.errno != errno.EPIPE:
395 395 raise
396 396
397 397 class unixservicehandler(object):
398 398 """Set of pluggable operations for unix-mode services
399 399
400 400 Almost all methods except for createcmdserver() are called in the main
401 401 process. You can't pass mutable resource back from createcmdserver().
402 402 """
403 403
404 404 pollinterval = None
405 405
406 406 def __init__(self, ui):
407 407 self.ui = ui
408 408
409 409 def bindsocket(self, sock, address):
410 410 util.bindunixsocket(sock, address)
411 411 sock.listen(socket.SOMAXCONN)
412 412 self.ui.status(_('listening at %s\n') % address)
413 413 self.ui.flush() # avoid buffering of status message
414 414
415 415 def unlinksocket(self, address):
416 416 os.unlink(address)
417 417
418 418 def shouldexit(self):
419 419 """True if server should shut down; checked per pollinterval"""
420 420 return False
421 421
422 422 def newconnection(self):
423 423 """Called when main process notices new connection"""
424 424
425 425 def createcmdserver(self, repo, conn, fin, fout):
426 426 """Create new command server instance; called in the process that
427 427 serves for the current connection"""
428 428 return server(self.ui, repo, fin, fout)
429 429
430 430 class unixforkingservice(object):
431 431 """
432 432 Listens on unix domain socket and forks server per connection
433 433 """
434 434
435 435 def __init__(self, ui, repo, opts, handler=None):
436 436 self.ui = ui
437 437 self.repo = repo
438 438 self.address = opts['address']
439 439 if not util.safehasattr(socket, 'AF_UNIX'):
440 440 raise error.Abort(_('unsupported platform'))
441 441 if not self.address:
442 442 raise error.Abort(_('no socket path specified with --address'))
443 443 self._servicehandler = handler or unixservicehandler(ui)
444 444 self._sock = None
445 445 self._oldsigchldhandler = None
446 446 self._workerpids = set() # updated by signal handler; do not iterate
447 447 self._socketunlinked = None
448 448
449 449 def init(self):
450 450 self._sock = socket.socket(socket.AF_UNIX)
451 451 self._servicehandler.bindsocket(self._sock, self.address)
452 452 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
453 453 self._oldsigchldhandler = o
454 454 self._socketunlinked = False
455 455
456 456 def _unlinksocket(self):
457 457 if not self._socketunlinked:
458 458 self._servicehandler.unlinksocket(self.address)
459 459 self._socketunlinked = True
460 460
461 461 def _cleanup(self):
462 462 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
463 463 self._sock.close()
464 464 self._unlinksocket()
465 465 # don't kill child processes as they have active clients, just wait
466 466 self._reapworkers(0)
467 467
468 468 def run(self):
469 469 try:
470 470 self._mainloop()
471 471 finally:
472 472 self._cleanup()
473 473
474 474 def _mainloop(self):
475 475 exiting = False
476 476 h = self._servicehandler
477 477 selector = selectors2.DefaultSelector()
478 478 selector.register(self._sock, selectors2.EVENT_READ)
479 479 while True:
480 480 if not exiting and h.shouldexit():
481 481 # clients can no longer connect() to the domain socket, so
482 482 # we stop queuing new requests.
483 483 # for requests that are queued (connect()-ed, but haven't been
484 484 # accept()-ed), handle them before exit. otherwise, clients
485 485 # waiting for recv() will receive ECONNRESET.
486 486 self._unlinksocket()
487 487 exiting = True
488 488 ready = selector.select(timeout=h.pollinterval)
489 489 if not ready:
490 490 # only exit if we completed all queued requests
491 491 if exiting:
492 492 break
493 493 continue
494 494 try:
495 495 conn, _addr = self._sock.accept()
496 496 except socket.error as inst:
497 497 if inst.args[0] == errno.EINTR:
498 498 continue
499 499 raise
500 500
501 501 pid = os.fork()
502 502 if pid:
503 503 try:
504 504 self.ui.debug('forked worker process (pid=%d)\n' % pid)
505 505 self._workerpids.add(pid)
506 506 h.newconnection()
507 507 finally:
508 508 conn.close() # release handle in parent process
509 509 else:
510 510 try:
511 511 self._runworker(conn)
512 512 conn.close()
513 513 os._exit(0)
514 514 except: # never return, hence no re-raises
515 515 try:
516 516 self.ui.traceback(force=True)
517 517 finally:
518 518 os._exit(255)
519 519 selector.close()
520 520
521 521 def _sigchldhandler(self, signal, frame):
522 522 self._reapworkers(os.WNOHANG)
523 523
524 524 def _reapworkers(self, options):
525 525 while self._workerpids:
526 526 try:
527 527 pid, _status = os.waitpid(-1, options)
528 528 except OSError as inst:
529 529 if inst.errno == errno.EINTR:
530 530 continue
531 531 if inst.errno != errno.ECHILD:
532 532 raise
533 533 # no child processes at all (reaped by other waitpid()?)
534 534 self._workerpids.clear()
535 535 return
536 536 if pid == 0:
537 537 # no waitable child processes
538 538 return
539 539 self.ui.debug('worker process exited (pid=%d)\n' % pid)
540 540 self._workerpids.discard(pid)
541 541
542 542 def _runworker(self, conn):
543 543 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
544 544 _initworkerprocess()
545 545 h = self._servicehandler
546 546 try:
547 547 _serverequest(self.ui, self.repo, conn, h.createcmdserver)
548 548 finally:
549 549 gc.collect() # trigger __del__ since worker process uses os._exit
@@ -1,745 +1,743 b''
1 1 """ Back-ported, durable, and portable selectors """
2 2
3 3 # MIT License
4 4 #
5 5 # Copyright (c) 2017 Seth Michael Larson
6 6 #
7 7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 8 # of this software and associated documentation files (the "Software"), to deal
9 9 # in the Software without restriction, including without limitation the rights
10 10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 11 # copies of the Software, and to permit persons to whom the Software is
12 12 # furnished to do so, subject to the following conditions:
13 13 #
14 14 # The above copyright notice and this permission notice shall be included in all
15 15 # copies or substantial portions of the Software.
16 16 #
17 17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 23 # SOFTWARE.
24 24
25 # no-check-code
26
27 25 from __future__ import absolute_import
28 26
29 27 import collections
30 28 import errno
31 29 import math
32 30 import select
33 31 import socket
34 32 import sys
35 33 import time
36 34
37 from . import pycompat
35 from .. import pycompat
38 36
39 37 namedtuple = collections.namedtuple
40 38 Mapping = collections.Mapping
41 39
42 40 try:
43 41 monotonic = time.monotonic
44 42 except AttributeError:
45 43 monotonic = time.time
46 44
47 45 __author__ = 'Seth Michael Larson'
48 46 __email__ = 'sethmichaellarson@protonmail.com'
49 47 __version__ = '2.0.0'
50 48 __license__ = 'MIT'
51 49 __url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
52 50
53 51 __all__ = ['EVENT_READ',
54 52 'EVENT_WRITE',
55 53 'SelectorKey',
56 54 'DefaultSelector',
57 55 'BaseSelector']
58 56
59 57 EVENT_READ = (1 << 0)
60 58 EVENT_WRITE = (1 << 1)
61 59 _DEFAULT_SELECTOR = None
62 60 _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
63 61 _ERROR_TYPES = (OSError, IOError, socket.error)
64 62
65 63
66 64 SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
67 65
68 66
69 67 class _SelectorMapping(Mapping):
70 68 """ Mapping of file objects to selector keys """
71 69
72 70 def __init__(self, selector):
73 71 self._selector = selector
74 72
75 73 def __len__(self):
76 74 return len(self._selector._fd_to_key)
77 75
78 76 def __getitem__(self, fileobj):
79 77 try:
80 78 fd = self._selector._fileobj_lookup(fileobj)
81 79 return self._selector._fd_to_key[fd]
82 80 except KeyError:
83 81 raise KeyError("{0!r} is not registered.".format(fileobj))
84 82
85 83 def __iter__(self):
86 84 return iter(self._selector._fd_to_key)
87 85
88 86
89 87 def _fileobj_to_fd(fileobj):
90 88 """ Return a file descriptor from a file object. If
91 89 given an integer will simply return that integer back. """
92 90 if isinstance(fileobj, int):
93 91 fd = fileobj
94 92 else:
95 93 try:
96 94 fd = int(fileobj.fileno())
97 95 except (AttributeError, TypeError, ValueError):
98 96 raise ValueError("Invalid file object: {0!r}".format(fileobj))
99 97 if fd < 0:
100 98 raise ValueError("Invalid file descriptor: {0}".format(fd))
101 99 return fd
102 100
103 101
104 102 class BaseSelector(object):
105 103 """ Abstract Selector class
106 104
107 105 A selector supports registering file objects to be monitored
108 106 for specific I/O events.
109 107
110 108 A file object is a file descriptor or any object with a
111 109 `fileno()` method. An arbitrary object can be attached to the
112 110 file object which can be used for example to store context info,
113 111 a callback, etc.
114 112
115 113 A selector can use various implementations (select(), poll(), epoll(),
116 114 and kqueue()) depending on the platform. The 'DefaultSelector' class uses
117 115 the most efficient implementation for the current platform.
118 116 """
119 117 def __init__(self):
120 118 # Maps file descriptors to keys.
121 119 self._fd_to_key = {}
122 120
123 121 # Read-only mapping returned by get_map()
124 122 self._map = _SelectorMapping(self)
125 123
126 124 def _fileobj_lookup(self, fileobj):
127 125 """ Return a file descriptor from a file object.
128 126 This wraps _fileobj_to_fd() to do an exhaustive
129 127 search in case the object is invalid but we still
130 128 have it in our map. Used by unregister() so we can
131 129 unregister an object that was previously registered
132 130 even if it is closed. It is also used by _SelectorMapping
133 131 """
134 132 try:
135 133 return _fileobj_to_fd(fileobj)
136 134 except ValueError:
137 135
138 136 # Search through all our mapped keys.
139 137 for key in self._fd_to_key.values():
140 138 if key.fileobj is fileobj:
141 139 return key.fd
142 140
143 141 # Raise ValueError after all.
144 142 raise
145 143
146 144 def register(self, fileobj, events, data=None):
147 145 """ Register a file object for a set of events to monitor. """
148 146 if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
149 147 raise ValueError("Invalid events: {0!r}".format(events))
150 148
151 149 key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
152 150
153 151 if key.fd in self._fd_to_key:
154 152 raise KeyError("{0!r} (FD {1}) is already registered"
155 153 .format(fileobj, key.fd))
156 154
157 155 self._fd_to_key[key.fd] = key
158 156 return key
159 157
160 158 def unregister(self, fileobj):
161 159 """ Unregister a file object from being monitored. """
162 160 try:
163 161 key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
164 162 except KeyError:
165 163 raise KeyError("{0!r} is not registered".format(fileobj))
166 164
167 165 # Getting the fileno of a closed socket on Windows errors with EBADF.
168 166 except socket.error as err:
169 167 if err.errno != errno.EBADF:
170 168 raise
171 169 else:
172 170 for key in self._fd_to_key.values():
173 171 if key.fileobj is fileobj:
174 172 self._fd_to_key.pop(key.fd)
175 173 break
176 174 else:
177 175 raise KeyError("{0!r} is not registered".format(fileobj))
178 176 return key
179 177
180 178 def modify(self, fileobj, events, data=None):
181 179 """ Change a registered file object monitored events and data. """
182 180 # NOTE: Some subclasses optimize this operation even further.
183 181 try:
184 182 key = self._fd_to_key[self._fileobj_lookup(fileobj)]
185 183 except KeyError:
186 184 raise KeyError("{0!r} is not registered".format(fileobj))
187 185
188 186 if events != key.events:
189 187 self.unregister(fileobj)
190 188 key = self.register(fileobj, events, data)
191 189
192 190 elif data != key.data:
193 191 # Use a shortcut to update the data.
194 192 key = key._replace(data=data)
195 193 self._fd_to_key[key.fd] = key
196 194
197 195 return key
198 196
199 197 def select(self, timeout=None):
200 198 """ Perform the actual selection until some monitored file objects
201 199 are ready or the timeout expires. """
202 200 raise NotImplementedError()
203 201
204 202 def close(self):
205 203 """ Close the selector. This must be called to ensure that all
206 204 underlying resources are freed. """
207 205 self._fd_to_key.clear()
208 206 self._map = None
209 207
210 208 def get_key(self, fileobj):
211 209 """ Return the key associated with a registered file object. """
212 210 mapping = self.get_map()
213 211 if mapping is None:
214 212 raise RuntimeError("Selector is closed")
215 213 try:
216 214 return mapping[fileobj]
217 215 except KeyError:
218 216 raise KeyError("{0!r} is not registered".format(fileobj))
219 217
220 218 def get_map(self):
221 219 """ Return a mapping of file objects to selector keys """
222 220 return self._map
223 221
224 222 def _key_from_fd(self, fd):
225 223 """ Return the key associated to a given file descriptor
226 224 Return None if it is not found. """
227 225 try:
228 226 return self._fd_to_key[fd]
229 227 except KeyError:
230 228 return None
231 229
232 230 def __enter__(self):
233 231 return self
234 232
235 233 def __exit__(self, *_):
236 234 self.close()
237 235
238 236
239 237 # Almost all platforms have select.select()
240 238 if hasattr(select, "select"):
241 239 class SelectSelector(BaseSelector):
242 240 """ Select-based selector. """
243 241 def __init__(self):
244 242 super(SelectSelector, self).__init__()
245 243 self._readers = set()
246 244 self._writers = set()
247 245
248 246 def register(self, fileobj, events, data=None):
249 247 key = super(SelectSelector, self).register(fileobj, events, data)
250 248 if events & EVENT_READ:
251 249 self._readers.add(key.fd)
252 250 if events & EVENT_WRITE:
253 251 self._writers.add(key.fd)
254 252 return key
255 253
256 254 def unregister(self, fileobj):
257 255 key = super(SelectSelector, self).unregister(fileobj)
258 256 self._readers.discard(key.fd)
259 257 self._writers.discard(key.fd)
260 258 return key
261 259
262 260 def select(self, timeout=None):
263 261 # Selecting on empty lists on Windows errors out.
264 262 if not len(self._readers) and not len(self._writers):
265 263 return []
266 264
267 265 timeout = None if timeout is None else max(timeout, 0.0)
268 266 ready = []
269 267 r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
270 268 self._writers, timeout)
271 269 r = set(r)
272 270 w = set(w)
273 271 for fd in r | w:
274 272 events = 0
275 273 if fd in r:
276 274 events |= EVENT_READ
277 275 if fd in w:
278 276 events |= EVENT_WRITE
279 277
280 278 key = self._key_from_fd(fd)
281 279 if key:
282 280 ready.append((key, events & key.events))
283 281 return ready
284 282
285 283 def _wrap_select(self, r, w, timeout=None):
286 284 """ Wrapper for select.select because timeout is a positional arg """
287 285 return select.select(r, w, [], timeout)
288 286
289 287 __all__.append('SelectSelector')
290 288
291 289 # Jython has a different implementation of .fileno() for socket objects.
292 290 if pycompat.isjython:
293 291 class _JythonSelectorMapping(object):
294 292 """ This is an implementation of _SelectorMapping that is built
295 293 for use specifically with Jython, which does not provide a hashable
296 294 value from socket.socket.fileno(). """
297 295
298 296 def __init__(self, selector):
299 297 assert isinstance(selector, JythonSelectSelector)
300 298 self._selector = selector
301 299
302 300 def __len__(self):
303 301 return len(self._selector._sockets)
304 302
305 303 def __getitem__(self, fileobj):
306 304 for sock, key in self._selector._sockets:
307 305 if sock is fileobj:
308 306 return key
309 307 else:
310 308 raise KeyError("{0!r} is not registered.".format(fileobj))
311 309
312 310 class JythonSelectSelector(SelectSelector):
313 311 """ This is an implementation of SelectSelector that is for Jython
314 312 which works around that Jython's socket.socket.fileno() does not
315 313 return an integer fd value. All SelectorKey.fd will be equal to -1
316 314 and should not be used. This instead uses object id to compare fileobj
317 315 and will only use select.select as it's the only selector that allows
318 316 directly passing in socket objects rather than registering fds.
319 317 See: http://bugs.jython.org/issue1678
320 318 https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
321 319 """
322 320
323 321 def __init__(self):
324 322 super(JythonSelectSelector, self).__init__()
325 323
326 324 self._sockets = [] # Uses a list of tuples instead of dictionary.
327 325 self._map = _JythonSelectorMapping(self)
328 326 self._readers = []
329 327 self._writers = []
330 328
331 329 # Jython has a select.cpython_compatible_select function in older versions.
332 330 self._select_func = getattr(select, 'cpython_compatible_select', select.select)
333 331
334 332 def register(self, fileobj, events, data=None):
335 333 for sock, _ in self._sockets:
336 334 if sock is fileobj:
337 335 raise KeyError("{0!r} is already registered"
338 336 .format(fileobj, sock))
339 337
340 338 key = SelectorKey(fileobj, -1, events, data)
341 339 self._sockets.append((fileobj, key))
342 340
343 341 if events & EVENT_READ:
344 342 self._readers.append(fileobj)
345 343 if events & EVENT_WRITE:
346 344 self._writers.append(fileobj)
347 345 return key
348 346
349 347 def unregister(self, fileobj):
350 348 for i, (sock, key) in enumerate(self._sockets):
351 349 if sock is fileobj:
352 350 break
353 351 else:
354 352 raise KeyError("{0!r} is not registered.".format(fileobj))
355 353
356 354 if key.events & EVENT_READ:
357 355 self._readers.remove(fileobj)
358 356 if key.events & EVENT_WRITE:
359 357 self._writers.remove(fileobj)
360 358
361 359 del self._sockets[i]
362 360 return key
363 361
364 362 def _wrap_select(self, r, w, timeout=None):
365 363 """ Wrapper for select.select because timeout is a positional arg """
366 364 return self._select_func(r, w, [], timeout)
367 365
368 366 __all__.append('JythonSelectSelector')
369 367 SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
370 368
371 369
372 370 if hasattr(select, "poll"):
373 371 class PollSelector(BaseSelector):
374 372 """ Poll-based selector """
375 373 def __init__(self):
376 374 super(PollSelector, self).__init__()
377 375 self._poll = select.poll()
378 376
379 377 def register(self, fileobj, events, data=None):
380 378 key = super(PollSelector, self).register(fileobj, events, data)
381 379 event_mask = 0
382 380 if events & EVENT_READ:
383 381 event_mask |= select.POLLIN
384 382 if events & EVENT_WRITE:
385 383 event_mask |= select.POLLOUT
386 384 self._poll.register(key.fd, event_mask)
387 385 return key
388 386
389 387 def unregister(self, fileobj):
390 388 key = super(PollSelector, self).unregister(fileobj)
391 389 self._poll.unregister(key.fd)
392 390 return key
393 391
394 392 def _wrap_poll(self, timeout=None):
395 393 """ Wrapper function for select.poll.poll() so that
396 394 _syscall_wrapper can work with only seconds. """
397 395 if timeout is not None:
398 396 if timeout <= 0:
399 397 timeout = 0
400 398 else:
401 399 # select.poll.poll() has a resolution of 1 millisecond,
402 400 # round away from zero to wait *at least* timeout seconds.
403 401 timeout = math.ceil(timeout * 1000)
404 402
405 403 result = self._poll.poll(timeout)
406 404 return result
407 405
408 406 def select(self, timeout=None):
409 407 ready = []
410 408 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
411 409 for fd, event_mask in fd_events:
412 410 events = 0
413 411 if event_mask & ~select.POLLIN:
414 412 events |= EVENT_WRITE
415 413 if event_mask & ~select.POLLOUT:
416 414 events |= EVENT_READ
417 415
418 416 key = self._key_from_fd(fd)
419 417 if key:
420 418 ready.append((key, events & key.events))
421 419
422 420 return ready
423 421
424 422 __all__.append('PollSelector')
425 423
426 424 if hasattr(select, "epoll"):
427 425 class EpollSelector(BaseSelector):
428 426 """ Epoll-based selector """
429 427 def __init__(self):
430 428 super(EpollSelector, self).__init__()
431 429 self._epoll = select.epoll()
432 430
433 431 def fileno(self):
434 432 return self._epoll.fileno()
435 433
436 434 def register(self, fileobj, events, data=None):
437 435 key = super(EpollSelector, self).register(fileobj, events, data)
438 436 events_mask = 0
439 437 if events & EVENT_READ:
440 438 events_mask |= select.EPOLLIN
441 439 if events & EVENT_WRITE:
442 440 events_mask |= select.EPOLLOUT
443 441 _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
444 442 return key
445 443
446 444 def unregister(self, fileobj):
447 445 key = super(EpollSelector, self).unregister(fileobj)
448 446 try:
449 447 _syscall_wrapper(self._epoll.unregister, False, key.fd)
450 448 except _ERROR_TYPES:
451 449 # This can occur when the fd was closed since registry.
452 450 pass
453 451 return key
454 452
455 453 def select(self, timeout=None):
456 454 if timeout is not None:
457 455 if timeout <= 0:
458 456 timeout = 0.0
459 457 else:
460 458 # select.epoll.poll() has a resolution of 1 millisecond
461 459 # but luckily takes seconds so we don't need a wrapper
462 460 # like PollSelector. Just for better rounding.
463 461 timeout = math.ceil(timeout * 1000) * 0.001
464 462 timeout = float(timeout)
465 463 else:
466 464 timeout = -1.0 # epoll.poll() must have a float.
467 465
468 466 # We always want at least 1 to ensure that select can be called
469 467 # with no file descriptors registered. Otherwise will fail.
470 468 max_events = max(len(self._fd_to_key), 1)
471 469
472 470 ready = []
473 471 fd_events = _syscall_wrapper(self._epoll.poll, True,
474 472 timeout=timeout,
475 473 maxevents=max_events)
476 474 for fd, event_mask in fd_events:
477 475 events = 0
478 476 if event_mask & ~select.EPOLLIN:
479 477 events |= EVENT_WRITE
480 478 if event_mask & ~select.EPOLLOUT:
481 479 events |= EVENT_READ
482 480
483 481 key = self._key_from_fd(fd)
484 482 if key:
485 483 ready.append((key, events & key.events))
486 484 return ready
487 485
488 486 def close(self):
489 487 self._epoll.close()
490 488 super(EpollSelector, self).close()
491 489
492 490 __all__.append('EpollSelector')
493 491
494 492
495 493 if hasattr(select, "devpoll"):
496 494 class DevpollSelector(BaseSelector):
497 495 """Solaris /dev/poll selector."""
498 496
499 497 def __init__(self):
500 498 super(DevpollSelector, self).__init__()
501 499 self._devpoll = select.devpoll()
502 500
503 501 def fileno(self):
504 502 return self._devpoll.fileno()
505 503
506 504 def register(self, fileobj, events, data=None):
507 505 key = super(DevpollSelector, self).register(fileobj, events, data)
508 506 poll_events = 0
509 507 if events & EVENT_READ:
510 508 poll_events |= select.POLLIN
511 509 if events & EVENT_WRITE:
512 510 poll_events |= select.POLLOUT
513 511 self._devpoll.register(key.fd, poll_events)
514 512 return key
515 513
516 514 def unregister(self, fileobj):
517 515 key = super(DevpollSelector, self).unregister(fileobj)
518 516 self._devpoll.unregister(key.fd)
519 517 return key
520 518
521 519 def _wrap_poll(self, timeout=None):
522 520 """ Wrapper function for select.poll.poll() so that
523 521 _syscall_wrapper can work with only seconds. """
524 522 if timeout is not None:
525 523 if timeout <= 0:
526 524 timeout = 0
527 525 else:
528 526 # select.devpoll.poll() has a resolution of 1 millisecond,
529 527 # round away from zero to wait *at least* timeout seconds.
530 528 timeout = math.ceil(timeout * 1000)
531 529
532 530 result = self._devpoll.poll(timeout)
533 531 return result
534 532
535 533 def select(self, timeout=None):
536 534 ready = []
537 535 fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
538 536 for fd, event_mask in fd_events:
539 537 events = 0
540 538 if event_mask & ~select.POLLIN:
541 539 events |= EVENT_WRITE
542 540 if event_mask & ~select.POLLOUT:
543 541 events |= EVENT_READ
544 542
545 543 key = self._key_from_fd(fd)
546 544 if key:
547 545 ready.append((key, events & key.events))
548 546
549 547 return ready
550 548
551 549 def close(self):
552 550 self._devpoll.close()
553 551 super(DevpollSelector, self).close()
554 552
555 553 __all__.append('DevpollSelector')
556 554
557 555
558 556 if hasattr(select, "kqueue"):
559 557 class KqueueSelector(BaseSelector):
560 558 """ Kqueue / Kevent-based selector """
561 559 def __init__(self):
562 560 super(KqueueSelector, self).__init__()
563 561 self._kqueue = select.kqueue()
564 562
565 563 def fileno(self):
566 564 return self._kqueue.fileno()
567 565
568 566 def register(self, fileobj, events, data=None):
569 567 key = super(KqueueSelector, self).register(fileobj, events, data)
570 568 if events & EVENT_READ:
571 569 kevent = select.kevent(key.fd,
572 570 select.KQ_FILTER_READ,
573 571 select.KQ_EV_ADD)
574 572
575 573 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
576 574
577 575 if events & EVENT_WRITE:
578 576 kevent = select.kevent(key.fd,
579 577 select.KQ_FILTER_WRITE,
580 578 select.KQ_EV_ADD)
581 579
582 580 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
583 581
584 582 return key
585 583
586 584 def unregister(self, fileobj):
587 585 key = super(KqueueSelector, self).unregister(fileobj)
588 586 if key.events & EVENT_READ:
589 587 kevent = select.kevent(key.fd,
590 588 select.KQ_FILTER_READ,
591 589 select.KQ_EV_DELETE)
592 590 try:
593 591 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
594 592 except _ERROR_TYPES:
595 593 pass
596 594 if key.events & EVENT_WRITE:
597 595 kevent = select.kevent(key.fd,
598 596 select.KQ_FILTER_WRITE,
599 597 select.KQ_EV_DELETE)
600 598 try:
601 599 _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
602 600 except _ERROR_TYPES:
603 601 pass
604 602
605 603 return key
606 604
607 605 def select(self, timeout=None):
608 606 if timeout is not None:
609 607 timeout = max(timeout, 0)
610 608
611 609 max_events = len(self._fd_to_key) * 2
612 610 ready_fds = {}
613 611
614 612 kevent_list = _syscall_wrapper(self._kqueue.control, True,
615 613 None, max_events, timeout)
616 614
617 615 for kevent in kevent_list:
618 616 fd = kevent.ident
619 617 event_mask = kevent.filter
620 618 events = 0
621 619 if event_mask == select.KQ_FILTER_READ:
622 620 events |= EVENT_READ
623 621 if event_mask == select.KQ_FILTER_WRITE:
624 622 events |= EVENT_WRITE
625 623
626 624 key = self._key_from_fd(fd)
627 625 if key:
628 626 if key.fd not in ready_fds:
629 627 ready_fds[key.fd] = (key, events & key.events)
630 628 else:
631 629 old_events = ready_fds[key.fd][1]
632 630 ready_fds[key.fd] = (key, (events | old_events) & key.events)
633 631
634 632 return list(ready_fds.values())
635 633
636 634 def close(self):
637 635 self._kqueue.close()
638 636 super(KqueueSelector, self).close()
639 637
640 638 __all__.append('KqueueSelector')
641 639
642 640
643 641 def _can_allocate(struct):
644 642 """ Checks that select structs can be allocated by the underlying
645 643 operating system, not just advertised by the select module. We don't
646 644 check select() because we'll be hopeful that most platforms that
647 645 don't have it available will not advertise it. (ie: GAE) """
648 646 try:
649 647 # select.poll() objects won't fail until used.
650 648 if struct == 'poll':
651 649 p = select.poll()
652 650 p.poll(0)
653 651
654 652 # All others will fail on allocation.
655 653 else:
656 654 getattr(select, struct)().close()
657 655 return True
658 656 except (OSError, AttributeError):
659 657 return False
660 658
661 659
662 660 # Python 3.5 uses a more direct route to wrap system calls to increase speed.
663 661 if sys.version_info >= (3, 5):
664 662 def _syscall_wrapper(func, _, *args, **kwargs):
665 663 """ This is the short-circuit version of the below logic
666 664 because in Python 3.5+ all selectors restart system calls. """
667 665 return func(*args, **kwargs)
668 666 else:
669 667 def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
670 668 """ Wrapper function for syscalls that could fail due to EINTR.
671 669 All functions should be retried if there is time left in the timeout
672 670 in accordance with PEP 475. """
673 671 timeout = kwargs.get("timeout", None)
674 672 if timeout is None:
675 673 expires = None
676 674 recalc_timeout = False
677 675 else:
678 676 timeout = float(timeout)
679 677 if timeout < 0.0: # Timeout less than 0 treated as no timeout.
680 678 expires = None
681 679 else:
682 680 expires = monotonic() + timeout
683 681
684 682 args = list(args)
685 683 if recalc_timeout and "timeout" not in kwargs:
686 684 raise ValueError(
687 685 "Timeout must be in args or kwargs to be recalculated")
688 686
689 687 result = _SYSCALL_SENTINEL
690 688 while result is _SYSCALL_SENTINEL:
691 689 try:
692 690 result = func(*args, **kwargs)
693 691 # OSError is thrown by select.select
694 692 # IOError is thrown by select.epoll.poll
695 693 # select.error is thrown by select.poll.poll
696 694 # Aren't we thankful for Python 3.x rework for exceptions?
697 695 except (OSError, IOError, select.error) as e:
698 696 # select.error wasn't a subclass of OSError in the past.
699 697 errcode = None
700 698 if hasattr(e, "errno"):
701 699 errcode = e.errno
702 700 elif hasattr(e, "args"):
703 701 errcode = e.args[0]
704 702
705 703 # Also test for the Windows equivalent of EINTR.
706 704 is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
707 705 errcode == errno.WSAEINTR))
708 706
709 707 if is_interrupt:
710 708 if expires is not None:
711 709 current_time = monotonic()
712 710 if current_time > expires:
713 711 raise OSError(errno=errno.ETIMEDOUT)
714 712 if recalc_timeout:
715 713 if "timeout" in kwargs:
716 714 kwargs["timeout"] = expires - current_time
717 715 continue
718 716 raise
719 717 return result
720 718
721 719
722 720 # Choose the best implementation, roughly:
723 721 # kqueue == devpoll == epoll > poll > select
724 722 # select() also can't accept a FD > FD_SETSIZE (usually around 1024)
725 723 def DefaultSelector():
726 724 """ This function serves as a first call for DefaultSelector to
727 725 detect if the select module is being monkey-patched incorrectly
728 726 by eventlet, greenlet, and preserve proper behavior. """
729 727 global _DEFAULT_SELECTOR
730 728 if _DEFAULT_SELECTOR is None:
731 729 if pycompat.isjython:
732 730 _DEFAULT_SELECTOR = JythonSelectSelector
733 731 elif _can_allocate('kqueue'):
734 732 _DEFAULT_SELECTOR = KqueueSelector
735 733 elif _can_allocate('devpoll'):
736 734 _DEFAULT_SELECTOR = DevpollSelector
737 735 elif _can_allocate('epoll'):
738 736 _DEFAULT_SELECTOR = EpollSelector
739 737 elif _can_allocate('poll'):
740 738 _DEFAULT_SELECTOR = PollSelector
741 739 elif hasattr(select, 'select'):
742 740 _DEFAULT_SELECTOR = SelectSelector
743 741 else: # Platform-specific: AppEngine
744 742 raise RuntimeError('Platform does not have a selector.')
745 743 return _DEFAULT_SELECTOR()
@@ -1,56 +1,55 b''
1 1 #require test-repo
2 2
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4 $ check_code="$TESTDIR"/../contrib/check-code.py
5 5 $ cd "$TESTDIR"/..
6 6
7 7 New errors are not allowed. Warnings are strongly discouraged.
8 8 (The writing "no-che?k-code" is for not skipping this file when checking.)
9 9
10 10 $ testrepohg locate \
11 11 > -X contrib/python-zstandard \
12 12 > -X hgext/fsmonitor/pywatchman \
13 13 > -X mercurial/thirdparty \
14 14 > | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false
15 15 Skipping i18n/polib.py it has no-che?k-code (glob)
16 16 Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob)
17 17 Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
18 Skipping mercurial/selectors2.py it has no-che?k-code (glob)
19 18 Skipping mercurial/statprof.py it has no-che?k-code (glob)
20 19 Skipping tests/badserverext.py it has no-che?k-code (glob)
21 20
22 21 @commands in debugcommands.py should be in alphabetical order.
23 22
24 23 >>> import re
25 24 >>> commands = []
26 25 >>> with open('mercurial/debugcommands.py', 'rb') as fh:
27 26 ... for line in fh:
28 27 ... m = re.match("^@command\('([a-z]+)", line)
29 28 ... if m:
30 29 ... commands.append(m.group(1))
31 30 >>> scommands = list(sorted(commands))
32 31 >>> for i, command in enumerate(scommands):
33 32 ... if command != commands[i]:
34 33 ... print('commands in debugcommands.py not sorted; first differing '
35 34 ... 'command is %s; expected %s' % (commands[i], command))
36 35 ... break
37 36
38 37 Prevent adding new files in the root directory accidentally.
39 38
40 39 $ testrepohg files 'glob:*'
41 40 .arcconfig
42 41 .clang-format
43 42 .editorconfig
44 43 .hgignore
45 44 .hgsigs
46 45 .hgtags
47 46 .jshintrc
48 47 CONTRIBUTING
49 48 CONTRIBUTORS
50 49 COPYING
51 50 Makefile
52 51 README.rst
53 52 hg
54 53 hgeditor
55 54 hgweb.cgi
56 55 setup.py
General Comments 0
You need to be logged in to leave comments. Login now