##// END OF EJS Templates
Merge pull request #5759 from minrk/travis-3.4...
Thomas Kluyver -
r16553:51fb84bb merge
parent child Browse files
Show More
@@ -1,20 +1,26 b''
1 1 # http://travis-ci.org/#!/ipython/ipython
2 2 language: python
3 3 python:
4 - 3.4
4 5 - 2.7
5 6 - 3.3
6 7 env:
7 - GROUP=
8 8 - GROUP=js
9 - GROUP=
9 10 before_install:
10 11 # workaround for https://github.com/travis-ci/travis-cookbooks/issues/155
11 12 - sudo rm -rf /dev/shm && sudo ln -s /run/shm /dev/shm
12 13 # Pierre Carrier's PPA for PhantomJS and CasperJS
13 14 - time sudo add-apt-repository -y ppa:pcarrier/ppa
14 15 - time sudo apt-get update
15 16 - time sudo apt-get install pandoc casperjs nodejs libzmq3-dev
16 17 - time pip install -f https://nipy.bic.berkeley.edu/wheelhouse/travis jinja2 sphinx pygments tornado requests mock pyzmq
17 18 install:
18 19 - time python setup.py install -q
19 20 script:
20 21 - cd /tmp && iptest $GROUP
22
23 matrix:
24 exclude:
25 - python: 3.3
26 env: GROUP=js
@@ -1,195 +1,198 b''
1 1 """Tornado handlers for the notebook.
2 2
3 3 Authors:
4 4
5 5 * Brian Granger
6 6 """
7 7
8 8 #-----------------------------------------------------------------------------
9 9 # Copyright (C) 2008-2011 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-----------------------------------------------------------------------------
14 14
15 15 #-----------------------------------------------------------------------------
16 16 # Imports
17 17 #-----------------------------------------------------------------------------
18 18
19 19 import logging
20 20 from tornado import web
21 21
22 22 from zmq.utils import jsonapi
23 23
24 24 from IPython.utils.jsonutil import date_default
25 25 from IPython.html.utils import url_path_join, url_escape
26 26
27 27 from ...base.handlers import IPythonHandler, json_errors
28 28 from ...base.zmqhandlers import AuthenticatedZMQStreamHandler
29 29
30 30 #-----------------------------------------------------------------------------
31 31 # Kernel handlers
32 32 #-----------------------------------------------------------------------------
33 33
34 34
35 35 class MainKernelHandler(IPythonHandler):
36 36
37 37 @web.authenticated
38 38 @json_errors
39 39 def get(self):
40 40 km = self.kernel_manager
41 41 self.finish(jsonapi.dumps(km.list_kernels()))
42 42
43 43 @web.authenticated
44 44 @json_errors
45 45 def post(self):
46 46 km = self.kernel_manager
47 47 kernel_id = km.start_kernel()
48 48 model = km.kernel_model(kernel_id)
49 49 location = url_path_join(self.base_url, 'api', 'kernels', kernel_id)
50 50 self.set_header('Location', url_escape(location))
51 51 self.set_status(201)
52 52 self.finish(jsonapi.dumps(model))
53 53
54 54
55 55 class KernelHandler(IPythonHandler):
56 56
57 57 SUPPORTED_METHODS = ('DELETE', 'GET')
58 58
59 59 @web.authenticated
60 60 @json_errors
61 61 def get(self, kernel_id):
62 62 km = self.kernel_manager
63 63 km._check_kernel_id(kernel_id)
64 64 model = km.kernel_model(kernel_id)
65 65 self.finish(jsonapi.dumps(model))
66 66
67 67 @web.authenticated
68 68 @json_errors
69 69 def delete(self, kernel_id):
70 70 km = self.kernel_manager
71 71 km.shutdown_kernel(kernel_id)
72 72 self.set_status(204)
73 73 self.finish()
74 74
75 75
76 76 class KernelActionHandler(IPythonHandler):
77 77
78 78 @web.authenticated
79 79 @json_errors
80 80 def post(self, kernel_id, action):
81 81 km = self.kernel_manager
82 82 if action == 'interrupt':
83 83 km.interrupt_kernel(kernel_id)
84 84 self.set_status(204)
85 85 if action == 'restart':
86 86 km.restart_kernel(kernel_id)
87 87 model = km.kernel_model(kernel_id)
88 88 self.set_header('Location', '{0}api/kernels/{1}'.format(self.base_url, kernel_id))
89 89 self.write(jsonapi.dumps(model))
90 90 self.finish()
91 91
92 92
93 93 class ZMQChannelHandler(AuthenticatedZMQStreamHandler):
94 94
95 95 def create_stream(self):
96 96 km = self.kernel_manager
97 97 meth = getattr(km, 'connect_%s' % self.channel)
98 98 self.zmq_stream = meth(self.kernel_id, identity=self.session.bsession)
99 99
100 100 def initialize(self, *args, **kwargs):
101 101 self.zmq_stream = None
102 102
103 103 def on_first_message(self, msg):
104 104 try:
105 105 super(ZMQChannelHandler, self).on_first_message(msg)
106 106 except web.HTTPError:
107 107 self.close()
108 108 return
109 109 try:
110 110 self.create_stream()
111 111 except web.HTTPError:
112 112 # WebSockets don't response to traditional error codes so we
113 113 # close the connection.
114 114 if not self.stream.closed():
115 115 self.stream.close()
116 116 self.close()
117 117 else:
118 118 self.zmq_stream.on_recv(self._on_zmq_reply)
119 119
120 120 def on_message(self, msg):
121 121 msg = jsonapi.loads(msg)
122 122 self.session.send(self.zmq_stream, msg)
123 123
124 124 def on_close(self):
125 125 # This method can be called twice, once by self.kernel_died and once
126 126 # from the WebSocket close event. If the WebSocket connection is
127 127 # closed before the ZMQ streams are setup, they could be None.
128 128 if self.zmq_stream is not None and not self.zmq_stream.closed():
129 129 self.zmq_stream.on_recv(None)
130 # close the socket directly, don't wait for the stream
131 socket = self.zmq_stream.socket
130 132 self.zmq_stream.close()
133 socket.close()
131 134
132 135
133 136 class IOPubHandler(ZMQChannelHandler):
134 137 channel = 'iopub'
135 138
136 139 def create_stream(self):
137 140 super(IOPubHandler, self).create_stream()
138 141 km = self.kernel_manager
139 142 km.add_restart_callback(self.kernel_id, self.on_kernel_restarted)
140 143 km.add_restart_callback(self.kernel_id, self.on_restart_failed, 'dead')
141 144
142 145 def on_close(self):
143 146 km = self.kernel_manager
144 147 if self.kernel_id in km:
145 148 km.remove_restart_callback(
146 149 self.kernel_id, self.on_kernel_restarted,
147 150 )
148 151 km.remove_restart_callback(
149 152 self.kernel_id, self.on_restart_failed, 'dead',
150 153 )
151 154 super(IOPubHandler, self).on_close()
152 155
153 156 def _send_status_message(self, status):
154 157 msg = self.session.msg("status",
155 158 {'execution_state': status}
156 159 )
157 160 self.write_message(jsonapi.dumps(msg, default=date_default))
158 161
159 162 def on_kernel_restarted(self):
160 163 logging.warn("kernel %s restarted", self.kernel_id)
161 164 self._send_status_message('restarting')
162 165
163 166 def on_restart_failed(self):
164 167 logging.error("kernel %s restarted failed!", self.kernel_id)
165 168 self._send_status_message('dead')
166 169
167 170 def on_message(self, msg):
168 171 """IOPub messages make no sense"""
169 172 pass
170 173
171 174
172 175 class ShellHandler(ZMQChannelHandler):
173 176 channel = 'shell'
174 177
175 178
176 179 class StdinHandler(ZMQChannelHandler):
177 180 channel = 'stdin'
178 181
179 182
180 183 #-----------------------------------------------------------------------------
181 184 # URL to handler mappings
182 185 #-----------------------------------------------------------------------------
183 186
184 187
185 188 _kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
186 189 _kernel_action_regex = r"(?P<action>restart|interrupt)"
187 190
188 191 default_handlers = [
189 192 (r"/api/kernels", MainKernelHandler),
190 193 (r"/api/kernels/%s" % _kernel_id_regex, KernelHandler),
191 194 (r"/api/kernels/%s/%s" % (_kernel_id_regex, _kernel_action_regex), KernelActionHandler),
192 195 (r"/api/kernels/%s/iopub" % _kernel_id_regex, IOPubHandler),
193 196 (r"/api/kernels/%s/shell" % _kernel_id_regex, ShellHandler),
194 197 (r"/api/kernels/%s/stdin" % _kernel_id_regex, StdinHandler)
195 198 ]
@@ -1,634 +1,637 b''
1 1 """Base classes to manage a Client's interaction with a running kernel
2 2 """
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2013 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 15 from __future__ import absolute_import
16 16
17 17 # Standard library imports
18 18 import atexit
19 19 import errno
20 20 from threading import Thread
21 21 import time
22 22
23 23 import zmq
24 24 # import ZMQError in top-level namespace, to avoid ugly attribute-error messages
25 25 # during garbage collection of threads at exit:
26 26 from zmq import ZMQError
27 27 from zmq.eventloop import ioloop, zmqstream
28 28
29 29 # Local imports
30 30 from .channelsabc import (
31 31 ShellChannelABC, IOPubChannelABC,
32 32 HBChannelABC, StdInChannelABC,
33 33 )
34 34 from IPython.utils.py3compat import string_types, iteritems
35 35
36 36 #-----------------------------------------------------------------------------
37 37 # Constants and exceptions
38 38 #-----------------------------------------------------------------------------
39 39
40 40 class InvalidPortNumber(Exception):
41 41 pass
42 42
43 43 #-----------------------------------------------------------------------------
44 44 # Utility functions
45 45 #-----------------------------------------------------------------------------
46 46
47 47 # some utilities to validate message structure, these might get moved elsewhere
48 48 # if they prove to have more generic utility
49 49
50 50 def validate_string_list(lst):
51 51 """Validate that the input is a list of strings.
52 52
53 53 Raises ValueError if not."""
54 54 if not isinstance(lst, list):
55 55 raise ValueError('input %r must be a list' % lst)
56 56 for x in lst:
57 57 if not isinstance(x, string_types):
58 58 raise ValueError('element %r in list must be a string' % x)
59 59
60 60
61 61 def validate_string_dict(dct):
62 62 """Validate that the input is a dict with string keys and values.
63 63
64 64 Raises ValueError if not."""
65 65 for k,v in iteritems(dct):
66 66 if not isinstance(k, string_types):
67 67 raise ValueError('key %r in dict must be a string' % k)
68 68 if not isinstance(v, string_types):
69 69 raise ValueError('value %r in dict must be a string' % v)
70 70
71 71
72 72 #-----------------------------------------------------------------------------
73 73 # ZMQ Socket Channel classes
74 74 #-----------------------------------------------------------------------------
75 75
76 76 class ZMQSocketChannel(Thread):
77 77 """The base class for the channels that use ZMQ sockets."""
78 78 context = None
79 79 session = None
80 80 socket = None
81 81 ioloop = None
82 82 stream = None
83 83 _address = None
84 84 _exiting = False
85 85 proxy_methods = []
86 86
87 87 def __init__(self, context, session, address):
88 88 """Create a channel.
89 89
90 90 Parameters
91 91 ----------
92 92 context : :class:`zmq.Context`
93 93 The ZMQ context to use.
94 94 session : :class:`session.Session`
95 95 The session to use.
96 96 address : zmq url
97 97 Standard (ip, port) tuple that the kernel is listening on.
98 98 """
99 99 super(ZMQSocketChannel, self).__init__()
100 100 self.daemon = True
101 101
102 102 self.context = context
103 103 self.session = session
104 104 if isinstance(address, tuple):
105 105 if address[1] == 0:
106 106 message = 'The port number for a channel cannot be 0.'
107 107 raise InvalidPortNumber(message)
108 108 address = "tcp://%s:%i" % address
109 109 self._address = address
110 110 atexit.register(self._notice_exit)
111 111
112 112 def _notice_exit(self):
113 113 self._exiting = True
114 114
115 115 def _run_loop(self):
116 116 """Run my loop, ignoring EINTR events in the poller"""
117 117 while True:
118 118 try:
119 119 self.ioloop.start()
120 120 except ZMQError as e:
121 121 if e.errno == errno.EINTR:
122 122 continue
123 123 else:
124 124 raise
125 125 except Exception:
126 126 if self._exiting:
127 127 break
128 128 else:
129 129 raise
130 130 else:
131 131 break
132 132
133 133 def stop(self):
134 134 """Stop the channel's event loop and join its thread.
135 135
136 136 This calls :meth:`~threading.Thread.join` and returns when the thread
137 137 terminates. :class:`RuntimeError` will be raised if
138 138 :meth:`~threading.Thread.start` is called again.
139 139 """
140 140 if self.ioloop is not None:
141 141 self.ioloop.stop()
142 142 self.join()
143 143 self.close()
144 144
145 145 def close(self):
146 146 if self.ioloop is not None:
147 147 try:
148 148 self.ioloop.close(all_fds=True)
149 149 except Exception:
150 150 pass
151 151 if self.socket is not None:
152 152 try:
153 153 self.socket.close(linger=0)
154 154 except Exception:
155 155 pass
156 156 self.socket = None
157 157
158 158 @property
159 159 def address(self):
160 160 """Get the channel's address as a zmq url string.
161 161
162 162 These URLS have the form: 'tcp://127.0.0.1:5555'.
163 163 """
164 164 return self._address
165 165
166 166 def _queue_send(self, msg):
167 167 """Queue a message to be sent from the IOLoop's thread.
168 168
169 169 Parameters
170 170 ----------
171 171 msg : message to send
172 172
173 173 This is threadsafe, as it uses IOLoop.add_callback to give the loop's
174 174 thread control of the action.
175 175 """
176 176 def thread_send():
177 177 self.session.send(self.stream, msg)
178 178 self.ioloop.add_callback(thread_send)
179 179
180 180 def _handle_recv(self, msg):
181 181 """Callback for stream.on_recv.
182 182
183 183 Unpacks message, and calls handlers with it.
184 184 """
185 185 ident,smsg = self.session.feed_identities(msg)
186 186 self.call_handlers(self.session.unserialize(smsg))
187 187
188 188
189 189
190 190 class ShellChannel(ZMQSocketChannel):
191 191 """The shell channel for issuing request/replies to the kernel."""
192 192
193 193 command_queue = None
194 194 # flag for whether execute requests should be allowed to call raw_input:
195 195 allow_stdin = True
196 196 proxy_methods = [
197 197 'execute',
198 198 'complete',
199 199 'object_info',
200 200 'history',
201 201 'kernel_info',
202 202 'shutdown',
203 203 ]
204 204
205 205 def __init__(self, context, session, address):
206 206 super(ShellChannel, self).__init__(context, session, address)
207 207 self.ioloop = ioloop.IOLoop()
208 208
209 209 def run(self):
210 210 """The thread's main activity. Call start() instead."""
211 211 self.socket = self.context.socket(zmq.DEALER)
212 self.socket.linger = 1000
212 213 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
213 214 self.socket.connect(self.address)
214 215 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
215 216 self.stream.on_recv(self._handle_recv)
216 217 self._run_loop()
217 218
218 219 def call_handlers(self, msg):
219 220 """This method is called in the ioloop thread when a message arrives.
220 221
221 222 Subclasses should override this method to handle incoming messages.
222 223 It is important to remember that this method is called in the thread
223 224 so that some logic must be done to ensure that the application level
224 225 handlers are called in the application thread.
225 226 """
226 227 raise NotImplementedError('call_handlers must be defined in a subclass.')
227 228
228 229 def execute(self, code, silent=False, store_history=True,
229 230 user_variables=None, user_expressions=None, allow_stdin=None):
230 231 """Execute code in the kernel.
231 232
232 233 Parameters
233 234 ----------
234 235 code : str
235 236 A string of Python code.
236 237
237 238 silent : bool, optional (default False)
238 239 If set, the kernel will execute the code as quietly possible, and
239 240 will force store_history to be False.
240 241
241 242 store_history : bool, optional (default True)
242 243 If set, the kernel will store command history. This is forced
243 244 to be False if silent is True.
244 245
245 246 user_variables : list, optional
246 247 A list of variable names to pull from the user's namespace. They
247 248 will come back as a dict with these names as keys and their
248 249 :func:`repr` as values.
249 250
250 251 user_expressions : dict, optional
251 252 A dict mapping names to expressions to be evaluated in the user's
252 253 dict. The expression values are returned as strings formatted using
253 254 :func:`repr`.
254 255
255 256 allow_stdin : bool, optional (default self.allow_stdin)
256 257 Flag for whether the kernel can send stdin requests to frontends.
257 258
258 259 Some frontends (e.g. the Notebook) do not support stdin requests.
259 260 If raw_input is called from code executed from such a frontend, a
260 261 StdinNotImplementedError will be raised.
261 262
262 263 Returns
263 264 -------
264 265 The msg_id of the message sent.
265 266 """
266 267 if user_variables is None:
267 268 user_variables = []
268 269 if user_expressions is None:
269 270 user_expressions = {}
270 271 if allow_stdin is None:
271 272 allow_stdin = self.allow_stdin
272 273
273 274
274 275 # Don't waste network traffic if inputs are invalid
275 276 if not isinstance(code, string_types):
276 277 raise ValueError('code %r must be a string' % code)
277 278 validate_string_list(user_variables)
278 279 validate_string_dict(user_expressions)
279 280
280 281 # Create class for content/msg creation. Related to, but possibly
281 282 # not in Session.
282 283 content = dict(code=code, silent=silent, store_history=store_history,
283 284 user_variables=user_variables,
284 285 user_expressions=user_expressions,
285 286 allow_stdin=allow_stdin,
286 287 )
287 288 msg = self.session.msg('execute_request', content)
288 289 self._queue_send(msg)
289 290 return msg['header']['msg_id']
290 291
291 292 def complete(self, text, line, cursor_pos, block=None):
292 293 """Tab complete text in the kernel's namespace.
293 294
294 295 Parameters
295 296 ----------
296 297 text : str
297 298 The text to complete.
298 299 line : str
299 300 The full line of text that is the surrounding context for the
300 301 text to complete.
301 302 cursor_pos : int
302 303 The position of the cursor in the line where the completion was
303 304 requested.
304 305 block : str, optional
305 306 The full block of code in which the completion is being requested.
306 307
307 308 Returns
308 309 -------
309 310 The msg_id of the message sent.
310 311 """
311 312 content = dict(text=text, line=line, block=block, cursor_pos=cursor_pos)
312 313 msg = self.session.msg('complete_request', content)
313 314 self._queue_send(msg)
314 315 return msg['header']['msg_id']
315 316
316 317 def object_info(self, oname, detail_level=0):
317 318 """Get metadata information about an object in the kernel's namespace.
318 319
319 320 Parameters
320 321 ----------
321 322 oname : str
322 323 A string specifying the object name.
323 324 detail_level : int, optional
324 325 The level of detail for the introspection (0-2)
325 326
326 327 Returns
327 328 -------
328 329 The msg_id of the message sent.
329 330 """
330 331 content = dict(oname=oname, detail_level=detail_level)
331 332 msg = self.session.msg('object_info_request', content)
332 333 self._queue_send(msg)
333 334 return msg['header']['msg_id']
334 335
335 336 def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
336 337 """Get entries from the kernel's history list.
337 338
338 339 Parameters
339 340 ----------
340 341 raw : bool
341 342 If True, return the raw input.
342 343 output : bool
343 344 If True, then return the output as well.
344 345 hist_access_type : str
345 346 'range' (fill in session, start and stop params), 'tail' (fill in n)
346 347 or 'search' (fill in pattern param).
347 348
348 349 session : int
349 350 For a range request, the session from which to get lines. Session
350 351 numbers are positive integers; negative ones count back from the
351 352 current session.
352 353 start : int
353 354 The first line number of a history range.
354 355 stop : int
355 356 The final (excluded) line number of a history range.
356 357
357 358 n : int
358 359 The number of lines of history to get for a tail request.
359 360
360 361 pattern : str
361 362 The glob-syntax pattern for a search request.
362 363
363 364 Returns
364 365 -------
365 366 The msg_id of the message sent.
366 367 """
367 368 content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
368 369 **kwargs)
369 370 msg = self.session.msg('history_request', content)
370 371 self._queue_send(msg)
371 372 return msg['header']['msg_id']
372 373
373 374 def kernel_info(self):
374 375 """Request kernel info."""
375 376 msg = self.session.msg('kernel_info_request')
376 377 self._queue_send(msg)
377 378 return msg['header']['msg_id']
378 379
379 380 def shutdown(self, restart=False):
380 381 """Request an immediate kernel shutdown.
381 382
382 383 Upon receipt of the (empty) reply, client code can safely assume that
383 384 the kernel has shut down and it's safe to forcefully terminate it if
384 385 it's still alive.
385 386
386 387 The kernel will send the reply via a function registered with Python's
387 388 atexit module, ensuring it's truly done as the kernel is done with all
388 389 normal operation.
389 390 """
390 391 # Send quit message to kernel. Once we implement kernel-side setattr,
391 392 # this should probably be done that way, but for now this will do.
392 393 msg = self.session.msg('shutdown_request', {'restart':restart})
393 394 self._queue_send(msg)
394 395 return msg['header']['msg_id']
395 396
396 397
397 398
398 399 class IOPubChannel(ZMQSocketChannel):
399 400 """The iopub channel which listens for messages that the kernel publishes.
400 401
401 402 This channel is where all output is published to frontends.
402 403 """
403 404
404 405 def __init__(self, context, session, address):
405 406 super(IOPubChannel, self).__init__(context, session, address)
406 407 self.ioloop = ioloop.IOLoop()
407 408
408 409 def run(self):
409 410 """The thread's main activity. Call start() instead."""
410 411 self.socket = self.context.socket(zmq.SUB)
412 self.socket.linger = 1000
411 413 self.socket.setsockopt(zmq.SUBSCRIBE,b'')
412 414 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
413 415 self.socket.connect(self.address)
414 416 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
415 417 self.stream.on_recv(self._handle_recv)
416 418 self._run_loop()
417 419
418 420 def call_handlers(self, msg):
419 421 """This method is called in the ioloop thread when a message arrives.
420 422
421 423 Subclasses should override this method to handle incoming messages.
422 424 It is important to remember that this method is called in the thread
423 425 so that some logic must be done to ensure that the application leve
424 426 handlers are called in the application thread.
425 427 """
426 428 raise NotImplementedError('call_handlers must be defined in a subclass.')
427 429
428 430 def flush(self, timeout=1.0):
429 431 """Immediately processes all pending messages on the iopub channel.
430 432
431 433 Callers should use this method to ensure that :meth:`call_handlers`
432 434 has been called for all messages that have been received on the
433 435 0MQ SUB socket of this channel.
434 436
435 437 This method is thread safe.
436 438
437 439 Parameters
438 440 ----------
439 441 timeout : float, optional
440 442 The maximum amount of time to spend flushing, in seconds. The
441 443 default is one second.
442 444 """
443 445 # We do the IOLoop callback process twice to ensure that the IOLoop
444 446 # gets to perform at least one full poll.
445 447 stop_time = time.time() + timeout
446 448 for i in range(2):
447 449 self._flushed = False
448 450 self.ioloop.add_callback(self._flush)
449 451 while not self._flushed and time.time() < stop_time:
450 452 time.sleep(0.01)
451 453
452 454 def _flush(self):
453 455 """Callback for :method:`self.flush`."""
454 456 self.stream.flush()
455 457 self._flushed = True
456 458
457 459
458 460 class StdInChannel(ZMQSocketChannel):
459 461 """The stdin channel to handle raw_input requests that the kernel makes."""
460 462
461 463 msg_queue = None
462 464 proxy_methods = ['input']
463 465
464 466 def __init__(self, context, session, address):
465 467 super(StdInChannel, self).__init__(context, session, address)
466 468 self.ioloop = ioloop.IOLoop()
467 469
468 470 def run(self):
469 471 """The thread's main activity. Call start() instead."""
470 472 self.socket = self.context.socket(zmq.DEALER)
473 self.socket.linger = 1000
471 474 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
472 475 self.socket.connect(self.address)
473 476 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
474 477 self.stream.on_recv(self._handle_recv)
475 478 self._run_loop()
476 479
477 480 def call_handlers(self, msg):
478 481 """This method is called in the ioloop thread when a message arrives.
479 482
480 483 Subclasses should override this method to handle incoming messages.
481 484 It is important to remember that this method is called in the thread
482 485 so that some logic must be done to ensure that the application leve
483 486 handlers are called in the application thread.
484 487 """
485 488 raise NotImplementedError('call_handlers must be defined in a subclass.')
486 489
487 490 def input(self, string):
488 491 """Send a string of raw input to the kernel."""
489 492 content = dict(value=string)
490 493 msg = self.session.msg('input_reply', content)
491 494 self._queue_send(msg)
492 495
493 496
494 497 class HBChannel(ZMQSocketChannel):
495 498 """The heartbeat channel which monitors the kernel heartbeat.
496 499
497 500 Note that the heartbeat channel is paused by default. As long as you start
498 501 this channel, the kernel manager will ensure that it is paused and un-paused
499 502 as appropriate.
500 503 """
501 504
502 505 time_to_dead = 3.0
503 506 socket = None
504 507 poller = None
505 508 _running = None
506 509 _pause = None
507 510 _beating = None
508 511
509 512 def __init__(self, context, session, address):
510 513 super(HBChannel, self).__init__(context, session, address)
511 514 self._running = False
512 515 self._pause =True
513 516 self.poller = zmq.Poller()
514 517
515 518 def _create_socket(self):
516 519 if self.socket is not None:
517 520 # close previous socket, before opening a new one
518 521 self.poller.unregister(self.socket)
519 522 self.socket.close()
520 523 self.socket = self.context.socket(zmq.REQ)
521 self.socket.setsockopt(zmq.LINGER, 0)
524 self.socket.linger = 1000
522 525 self.socket.connect(self.address)
523 526
524 527 self.poller.register(self.socket, zmq.POLLIN)
525 528
526 529 def _poll(self, start_time):
527 530 """poll for heartbeat replies until we reach self.time_to_dead.
528 531
529 532 Ignores interrupts, and returns the result of poll(), which
530 533 will be an empty list if no messages arrived before the timeout,
531 534 or the event tuple if there is a message to receive.
532 535 """
533 536
534 537 until_dead = self.time_to_dead - (time.time() - start_time)
535 538 # ensure poll at least once
536 539 until_dead = max(until_dead, 1e-3)
537 540 events = []
538 541 while True:
539 542 try:
540 543 events = self.poller.poll(1000 * until_dead)
541 544 except ZMQError as e:
542 545 if e.errno == errno.EINTR:
543 546 # ignore interrupts during heartbeat
544 547 # this may never actually happen
545 548 until_dead = self.time_to_dead - (time.time() - start_time)
546 549 until_dead = max(until_dead, 1e-3)
547 550 pass
548 551 else:
549 552 raise
550 553 except Exception:
551 554 if self._exiting:
552 555 break
553 556 else:
554 557 raise
555 558 else:
556 559 break
557 560 return events
558 561
559 562 def run(self):
560 563 """The thread's main activity. Call start() instead."""
561 564 self._create_socket()
562 565 self._running = True
563 566 self._beating = True
564 567
565 568 while self._running:
566 569 if self._pause:
567 570 # just sleep, and skip the rest of the loop
568 571 time.sleep(self.time_to_dead)
569 572 continue
570 573
571 574 since_last_heartbeat = 0.0
572 575 # io.rprint('Ping from HB channel') # dbg
573 576 # no need to catch EFSM here, because the previous event was
574 577 # either a recv or connect, which cannot be followed by EFSM
575 578 self.socket.send(b'ping')
576 579 request_time = time.time()
577 580 ready = self._poll(request_time)
578 581 if ready:
579 582 self._beating = True
580 583 # the poll above guarantees we have something to recv
581 584 self.socket.recv()
582 585 # sleep the remainder of the cycle
583 586 remainder = self.time_to_dead - (time.time() - request_time)
584 587 if remainder > 0:
585 588 time.sleep(remainder)
586 589 continue
587 590 else:
588 591 # nothing was received within the time limit, signal heart failure
589 592 self._beating = False
590 593 since_last_heartbeat = time.time() - request_time
591 594 self.call_handlers(since_last_heartbeat)
592 595 # and close/reopen the socket, because the REQ/REP cycle has been broken
593 596 self._create_socket()
594 597 continue
595 598
596 599 def pause(self):
597 600 """Pause the heartbeat."""
598 601 self._pause = True
599 602
600 603 def unpause(self):
601 604 """Unpause the heartbeat."""
602 605 self._pause = False
603 606
604 607 def is_beating(self):
605 608 """Is the heartbeat running and responsive (and not paused)."""
606 609 if self.is_alive() and not self._pause and self._beating:
607 610 return True
608 611 else:
609 612 return False
610 613
611 614 def stop(self):
612 615 """Stop the channel's event loop and join its thread."""
613 616 self._running = False
614 617 super(HBChannel, self).stop()
615 618
616 619 def call_handlers(self, since_last_heartbeat):
617 620 """This method is called in the ioloop thread when a message arrives.
618 621
619 622 Subclasses should override this method to handle incoming messages.
620 623 It is important to remember that this method is called in the thread
621 624 so that some logic must be done to ensure that the application level
622 625 handlers are called in the application thread.
623 626 """
624 627 raise NotImplementedError('call_handlers must be defined in a subclass.')
625 628
626 629
627 630 #---------------------------------------------------------------------#-----------------------------------------------------------------------------
628 631 # ABC Registration
629 632 #-----------------------------------------------------------------------------
630 633
631 634 ShellChannelABC.register(ShellChannel)
632 635 IOPubChannelABC.register(IOPubChannel)
633 636 HBChannelABC.register(HBChannel)
634 637 StdInChannelABC.register(StdInChannel)
@@ -1,569 +1,571 b''
1 1 """Utilities for connecting to kernels
2 2
3 3 Notable contents:
4 4 - ConnectionFileMixin class
5 5 encapsulates the logic related to writing and reading connections files.
6 6 """
7 7 # Copyright (c) IPython Development Team.
8 8 # Distributed under the terms of the Modified BSD License.
9 9
10 10 #-----------------------------------------------------------------------------
11 11 # Imports
12 12 #-----------------------------------------------------------------------------
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import glob
17 17 import json
18 18 import os
19 19 import socket
20 20 import sys
21 21 from getpass import getpass
22 22 from subprocess import Popen, PIPE
23 23 import tempfile
24 24
25 25 import zmq
26 26
27 27 # external imports
28 28 from IPython.external.ssh import tunnel
29 29
30 30 # IPython imports
31 31 from IPython.config import Configurable
32 32 from IPython.core.profiledir import ProfileDir
33 33 from IPython.utils.localinterfaces import localhost
34 34 from IPython.utils.path import filefind, get_ipython_dir
35 35 from IPython.utils.py3compat import (str_to_bytes, bytes_to_str, cast_bytes_py2,
36 36 string_types)
37 37 from IPython.utils.traitlets import (
38 38 Bool, Integer, Unicode, CaselessStrEnum,
39 39 )
40 40
41 41
42 42 #-----------------------------------------------------------------------------
43 43 # Working with Connection Files
44 44 #-----------------------------------------------------------------------------
45 45
46 46 def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0,
47 47 control_port=0, ip='', key=b'', transport='tcp',
48 48 signature_scheme='hmac-sha256',
49 49 ):
50 50 """Generates a JSON config file, including the selection of random ports.
51 51
52 52 Parameters
53 53 ----------
54 54
55 55 fname : unicode
56 56 The path to the file to write
57 57
58 58 shell_port : int, optional
59 59 The port to use for ROUTER (shell) channel.
60 60
61 61 iopub_port : int, optional
62 62 The port to use for the SUB channel.
63 63
64 64 stdin_port : int, optional
65 65 The port to use for the ROUTER (raw input) channel.
66 66
67 67 control_port : int, optional
68 68 The port to use for the ROUTER (control) channel.
69 69
70 70 hb_port : int, optional
71 71 The port to use for the heartbeat REP channel.
72 72
73 73 ip : str, optional
74 74 The ip address the kernel will bind to.
75 75
76 76 key : str, optional
77 77 The Session key used for message authentication.
78 78
79 79 signature_scheme : str, optional
80 80 The scheme used for message authentication.
81 81 This has the form 'digest-hash', where 'digest'
82 82 is the scheme used for digests, and 'hash' is the name of the hash function
83 83 used by the digest scheme.
84 84 Currently, 'hmac' is the only supported digest scheme,
85 85 and 'sha256' is the default hash function.
86 86
87 87 """
88 88 if not ip:
89 89 ip = localhost()
90 90 # default to temporary connector file
91 91 if not fname:
92 92 fd, fname = tempfile.mkstemp('.json')
93 93 os.close(fd)
94 94
95 95 # Find open ports as necessary.
96 96
97 97 ports = []
98 98 ports_needed = int(shell_port <= 0) + \
99 99 int(iopub_port <= 0) + \
100 100 int(stdin_port <= 0) + \
101 101 int(control_port <= 0) + \
102 102 int(hb_port <= 0)
103 103 if transport == 'tcp':
104 104 for i in range(ports_needed):
105 105 sock = socket.socket()
106 106 # struct.pack('ii', (0,0)) is 8 null bytes
107 107 sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8)
108 108 sock.bind(('', 0))
109 109 ports.append(sock)
110 110 for i, sock in enumerate(ports):
111 111 port = sock.getsockname()[1]
112 112 sock.close()
113 113 ports[i] = port
114 114 else:
115 115 N = 1
116 116 for i in range(ports_needed):
117 117 while os.path.exists("%s-%s" % (ip, str(N))):
118 118 N += 1
119 119 ports.append(N)
120 120 N += 1
121 121 if shell_port <= 0:
122 122 shell_port = ports.pop(0)
123 123 if iopub_port <= 0:
124 124 iopub_port = ports.pop(0)
125 125 if stdin_port <= 0:
126 126 stdin_port = ports.pop(0)
127 127 if control_port <= 0:
128 128 control_port = ports.pop(0)
129 129 if hb_port <= 0:
130 130 hb_port = ports.pop(0)
131 131
132 132 cfg = dict( shell_port=shell_port,
133 133 iopub_port=iopub_port,
134 134 stdin_port=stdin_port,
135 135 control_port=control_port,
136 136 hb_port=hb_port,
137 137 )
138 138 cfg['ip'] = ip
139 139 cfg['key'] = bytes_to_str(key)
140 140 cfg['transport'] = transport
141 141 cfg['signature_scheme'] = signature_scheme
142 142
143 143 with open(fname, 'w') as f:
144 144 f.write(json.dumps(cfg, indent=2))
145 145
146 146 return fname, cfg
147 147
148 148
149 149 def get_connection_file(app=None):
150 150 """Return the path to the connection file of an app
151 151
152 152 Parameters
153 153 ----------
154 154 app : IPKernelApp instance [optional]
155 155 If unspecified, the currently running app will be used
156 156 """
157 157 if app is None:
158 158 from IPython.kernel.zmq.kernelapp import IPKernelApp
159 159 if not IPKernelApp.initialized():
160 160 raise RuntimeError("app not specified, and not in a running Kernel")
161 161
162 162 app = IPKernelApp.instance()
163 163 return filefind(app.connection_file, ['.', app.profile_dir.security_dir])
164 164
165 165
166 166 def find_connection_file(filename, profile=None):
167 167 """find a connection file, and return its absolute path.
168 168
169 169 The current working directory and the profile's security
170 170 directory will be searched for the file if it is not given by
171 171 absolute path.
172 172
173 173 If profile is unspecified, then the current running application's
174 174 profile will be used, or 'default', if not run from IPython.
175 175
176 176 If the argument does not match an existing file, it will be interpreted as a
177 177 fileglob, and the matching file in the profile's security dir with
178 178 the latest access time will be used.
179 179
180 180 Parameters
181 181 ----------
182 182 filename : str
183 183 The connection file or fileglob to search for.
184 184 profile : str [optional]
185 185 The name of the profile to use when searching for the connection file,
186 186 if different from the current IPython session or 'default'.
187 187
188 188 Returns
189 189 -------
190 190 str : The absolute path of the connection file.
191 191 """
192 192 from IPython.core.application import BaseIPythonApplication as IPApp
193 193 try:
194 194 # quick check for absolute path, before going through logic
195 195 return filefind(filename)
196 196 except IOError:
197 197 pass
198 198
199 199 if profile is None:
200 200 # profile unspecified, check if running from an IPython app
201 201 if IPApp.initialized():
202 202 app = IPApp.instance()
203 203 profile_dir = app.profile_dir
204 204 else:
205 205 # not running in IPython, use default profile
206 206 profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default')
207 207 else:
208 208 # find profiledir by profile name:
209 209 profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
210 210 security_dir = profile_dir.security_dir
211 211
212 212 try:
213 213 # first, try explicit name
214 214 return filefind(filename, ['.', security_dir])
215 215 except IOError:
216 216 pass
217 217
218 218 # not found by full name
219 219
220 220 if '*' in filename:
221 221 # given as a glob already
222 222 pat = filename
223 223 else:
224 224 # accept any substring match
225 225 pat = '*%s*' % filename
226 226 matches = glob.glob( os.path.join(security_dir, pat) )
227 227 if not matches:
228 228 raise IOError("Could not find %r in %r" % (filename, security_dir))
229 229 elif len(matches) == 1:
230 230 return matches[0]
231 231 else:
232 232 # get most recent match, by access time:
233 233 return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1]
234 234
235 235
236 236 def get_connection_info(connection_file=None, unpack=False, profile=None):
237 237 """Return the connection information for the current Kernel.
238 238
239 239 Parameters
240 240 ----------
241 241 connection_file : str [optional]
242 242 The connection file to be used. Can be given by absolute path, or
243 243 IPython will search in the security directory of a given profile.
244 244 If run from IPython,
245 245
246 246 If unspecified, the connection file for the currently running
247 247 IPython Kernel will be used, which is only allowed from inside a kernel.
248 248 unpack : bool [default: False]
249 249 if True, return the unpacked dict, otherwise just the string contents
250 250 of the file.
251 251 profile : str [optional]
252 252 The name of the profile to use when searching for the connection file,
253 253 if different from the current IPython session or 'default'.
254 254
255 255
256 256 Returns
257 257 -------
258 258 The connection dictionary of the current kernel, as string or dict,
259 259 depending on `unpack`.
260 260 """
261 261 if connection_file is None:
262 262 # get connection file from current kernel
263 263 cf = get_connection_file()
264 264 else:
265 265 # connection file specified, allow shortnames:
266 266 cf = find_connection_file(connection_file, profile=profile)
267 267
268 268 with open(cf) as f:
269 269 info = f.read()
270 270
271 271 if unpack:
272 272 info = json.loads(info)
273 273 # ensure key is bytes:
274 274 info['key'] = str_to_bytes(info.get('key', ''))
275 275 return info
276 276
277 277
278 278 def connect_qtconsole(connection_file=None, argv=None, profile=None):
279 279 """Connect a qtconsole to the current kernel.
280 280
281 281 This is useful for connecting a second qtconsole to a kernel, or to a
282 282 local notebook.
283 283
284 284 Parameters
285 285 ----------
286 286 connection_file : str [optional]
287 287 The connection file to be used. Can be given by absolute path, or
288 288 IPython will search in the security directory of a given profile.
289 289 If run from IPython,
290 290
291 291 If unspecified, the connection file for the currently running
292 292 IPython Kernel will be used, which is only allowed from inside a kernel.
293 293 argv : list [optional]
294 294 Any extra args to be passed to the console.
295 295 profile : str [optional]
296 296 The name of the profile to use when searching for the connection file,
297 297 if different from the current IPython session or 'default'.
298 298
299 299
300 300 Returns
301 301 -------
302 302 subprocess.Popen instance running the qtconsole frontend
303 303 """
304 304 argv = [] if argv is None else argv
305 305
306 306 if connection_file is None:
307 307 # get connection file from current kernel
308 308 cf = get_connection_file()
309 309 else:
310 310 cf = find_connection_file(connection_file, profile=profile)
311 311
312 312 cmd = ';'.join([
313 313 "from IPython.qt.console import qtconsoleapp",
314 314 "qtconsoleapp.main()"
315 315 ])
316 316
317 317 return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv,
318 318 stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'),
319 319 )
320 320
321 321
322 322 def tunnel_to_kernel(connection_info, sshserver, sshkey=None):
323 323 """tunnel connections to a kernel via ssh
324 324
325 325 This will open four SSH tunnels from localhost on this machine to the
326 326 ports associated with the kernel. They can be either direct
327 327 localhost-localhost tunnels, or if an intermediate server is necessary,
328 328 the kernel must be listening on a public IP.
329 329
330 330 Parameters
331 331 ----------
332 332 connection_info : dict or str (path)
333 333 Either a connection dict, or the path to a JSON connection file
334 334 sshserver : str
335 335 The ssh sever to use to tunnel to the kernel. Can be a full
336 336 `user@server:port` string. ssh config aliases are respected.
337 337 sshkey : str [optional]
338 338 Path to file containing ssh key to use for authentication.
339 339 Only necessary if your ssh config does not already associate
340 340 a keyfile with the host.
341 341
342 342 Returns
343 343 -------
344 344
345 345 (shell, iopub, stdin, hb) : ints
346 346 The four ports on localhost that have been forwarded to the kernel.
347 347 """
348 348 if isinstance(connection_info, string_types):
349 349 # it's a path, unpack it
350 350 with open(connection_info) as f:
351 351 connection_info = json.loads(f.read())
352 352
353 353 cf = connection_info
354 354
355 355 lports = tunnel.select_random_ports(4)
356 356 rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port']
357 357
358 358 remote_ip = cf['ip']
359 359
360 360 if tunnel.try_passwordless_ssh(sshserver, sshkey):
361 361 password=False
362 362 else:
363 363 password = getpass("SSH Password for %s: " % cast_bytes_py2(sshserver))
364 364
365 365 for lp,rp in zip(lports, rports):
366 366 tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password)
367 367
368 368 return tuple(lports)
369 369
370 370
371 371 #-----------------------------------------------------------------------------
372 372 # Mixin for classes that work with connection files
373 373 #-----------------------------------------------------------------------------
374 374
375 375 channel_socket_types = {
376 376 'hb' : zmq.REQ,
377 377 'shell' : zmq.DEALER,
378 378 'iopub' : zmq.SUB,
379 379 'stdin' : zmq.DEALER,
380 380 'control': zmq.DEALER,
381 381 }
382 382
383 383 port_names = [ "%s_port" % channel for channel in ('shell', 'stdin', 'iopub', 'hb', 'control')]
384 384
385 385 class ConnectionFileMixin(Configurable):
386 386 """Mixin for configurable classes that work with connection files"""
387 387
388 388 # The addresses for the communication channels
389 389 connection_file = Unicode('', config=True,
390 390 help="""JSON file in which to store connection info [default: kernel-<pid>.json]
391 391
392 392 This file will contain the IP, ports, and authentication key needed to connect
393 393 clients to this kernel. By default, this file will be created in the security dir
394 394 of the current profile, but can be specified by absolute path.
395 395 """)
396 396 _connection_file_written = Bool(False)
397 397
398 398 transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True)
399 399
400 400 ip = Unicode(config=True,
401 401 help="""Set the kernel\'s IP address [default localhost].
402 402 If the IP address is something other than localhost, then
403 403 Consoles on other machines will be able to connect
404 404 to the Kernel, so be careful!"""
405 405 )
406 406
407 407 def _ip_default(self):
408 408 if self.transport == 'ipc':
409 409 if self.connection_file:
410 410 return os.path.splitext(self.connection_file)[0] + '-ipc'
411 411 else:
412 412 return 'kernel-ipc'
413 413 else:
414 414 return localhost()
415 415
416 416 def _ip_changed(self, name, old, new):
417 417 if new == '*':
418 418 self.ip = '0.0.0.0'
419 419
420 420 # protected traits
421 421
422 422 hb_port = Integer(0, config=True,
423 423 help="set the heartbeat port [default: random]")
424 424 shell_port = Integer(0, config=True,
425 425 help="set the shell (ROUTER) port [default: random]")
426 426 iopub_port = Integer(0, config=True,
427 427 help="set the iopub (PUB) port [default: random]")
428 428 stdin_port = Integer(0, config=True,
429 429 help="set the stdin (ROUTER) port [default: random]")
430 430 control_port = Integer(0, config=True,
431 431 help="set the control (ROUTER) port [default: random]")
432 432
433 433 @property
434 434 def ports(self):
435 435 return [ getattr(self, name) for name in port_names ]
436 436
437 437 #--------------------------------------------------------------------------
438 438 # Connection and ipc file management
439 439 #--------------------------------------------------------------------------
440 440
441 441 def get_connection_info(self):
442 442 """return the connection info as a dict"""
443 443 return dict(
444 444 transport=self.transport,
445 445 ip=self.ip,
446 446 shell_port=self.shell_port,
447 447 iopub_port=self.iopub_port,
448 448 stdin_port=self.stdin_port,
449 449 hb_port=self.hb_port,
450 450 control_port=self.control_port,
451 451 signature_scheme=self.session.signature_scheme,
452 452 key=self.session.key,
453 453 )
454 454
455 455 def cleanup_connection_file(self):
456 456 """Cleanup connection file *if we wrote it*
457 457
458 458 Will not raise if the connection file was already removed somehow.
459 459 """
460 460 if self._connection_file_written:
461 461 # cleanup connection files on full shutdown of kernel we started
462 462 self._connection_file_written = False
463 463 try:
464 464 os.remove(self.connection_file)
465 465 except (IOError, OSError, AttributeError):
466 466 pass
467 467
468 468 def cleanup_ipc_files(self):
469 469 """Cleanup ipc files if we wrote them."""
470 470 if self.transport != 'ipc':
471 471 return
472 472 for port in self.ports:
473 473 ipcfile = "%s-%i" % (self.ip, port)
474 474 try:
475 475 os.remove(ipcfile)
476 476 except (IOError, OSError):
477 477 pass
478 478
479 479 def write_connection_file(self):
480 480 """Write connection info to JSON dict in self.connection_file."""
481 481 if self._connection_file_written and os.path.exists(self.connection_file):
482 482 return
483 483
484 484 self.connection_file, cfg = write_connection_file(self.connection_file,
485 485 transport=self.transport, ip=self.ip, key=self.session.key,
486 486 stdin_port=self.stdin_port, iopub_port=self.iopub_port,
487 487 shell_port=self.shell_port, hb_port=self.hb_port,
488 488 control_port=self.control_port,
489 489 signature_scheme=self.session.signature_scheme,
490 490 )
491 491 # write_connection_file also sets default ports:
492 492 for name in port_names:
493 493 setattr(self, name, cfg[name])
494 494
495 495 self._connection_file_written = True
496 496
497 497 def load_connection_file(self):
498 498 """Load connection info from JSON dict in self.connection_file."""
499 499 self.log.debug(u"Loading connection file %s", self.connection_file)
500 500 with open(self.connection_file) as f:
501 501 cfg = json.load(f)
502 502 self.transport = cfg.get('transport', self.transport)
503 503 self.ip = cfg.get('ip', self._ip_default())
504 504
505 505 for name in port_names:
506 506 if getattr(self, name) == 0 and name in cfg:
507 507 # not overridden by config or cl_args
508 508 setattr(self, name, cfg[name])
509 509 if 'key' in cfg:
510 510 self.config.Session.key = str_to_bytes(cfg['key'])
511 511 if 'signature_scheme' in cfg:
512 512 self.config.Session.signature_scheme = cfg['signature_scheme']
513 513 #--------------------------------------------------------------------------
514 514 # Creating connected sockets
515 515 #--------------------------------------------------------------------------
516 516
517 517 def _make_url(self, channel):
518 518 """Make a ZeroMQ URL for a given channel."""
519 519 transport = self.transport
520 520 ip = self.ip
521 521 port = getattr(self, '%s_port' % channel)
522 522
523 523 if transport == 'tcp':
524 524 return "tcp://%s:%i" % (ip, port)
525 525 else:
526 526 return "%s://%s-%s" % (transport, ip, port)
527 527
528 528 def _create_connected_socket(self, channel, identity=None):
529 529 """Create a zmq Socket and connect it to the kernel."""
530 530 url = self._make_url(channel)
531 531 socket_type = channel_socket_types[channel]
532 532 self.log.debug("Connecting to: %s" % url)
533 533 sock = self.context.socket(socket_type)
534 # set linger to 1s to prevent hangs at exit
535 sock.linger = 1000
534 536 if identity:
535 537 sock.identity = identity
536 538 sock.connect(url)
537 539 return sock
538 540
539 541 def connect_iopub(self, identity=None):
540 542 """return zmq Socket connected to the IOPub channel"""
541 543 sock = self._create_connected_socket('iopub', identity=identity)
542 544 sock.setsockopt(zmq.SUBSCRIBE, b'')
543 545 return sock
544 546
545 547 def connect_shell(self, identity=None):
546 548 """return zmq Socket connected to the Shell channel"""
547 549 return self._create_connected_socket('shell', identity=identity)
548 550
549 551 def connect_stdin(self, identity=None):
550 552 """return zmq Socket connected to the StdIn channel"""
551 553 return self._create_connected_socket('stdin', identity=identity)
552 554
553 555 def connect_hb(self, identity=None):
554 556 """return zmq Socket connected to the Heartbeat channel"""
555 557 return self._create_connected_socket('hb', identity=identity)
556 558
557 559 def connect_control(self, identity=None):
558 560 """return zmq Socket connected to the Heartbeat channel"""
559 561 return self._create_connected_socket('control', identity=identity)
560 562
561 563
562 564 __all__ = [
563 565 'write_connection_file',
564 566 'get_connection_file',
565 567 'find_connection_file',
566 568 'get_connection_info',
567 569 'connect_qtconsole',
568 570 'tunnel_to_kernel',
569 571 ]
@@ -1,67 +1,68 b''
1 1 """The client and server for a basic ping-pong style heartbeat.
2 2 """
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2008-2011 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 15 import errno
16 16 import os
17 17 import socket
18 18 from threading import Thread
19 19
20 20 import zmq
21 21
22 22 from IPython.utils.localinterfaces import localhost
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Code
26 26 #-----------------------------------------------------------------------------
27 27
28 28
29 29 class Heartbeat(Thread):
30 30 "A simple ping-pong style heartbeat that runs in a thread."
31 31
32 32 def __init__(self, context, addr=None):
33 33 if addr is None:
34 34 addr = ('tcp', localhost(), 0)
35 35 Thread.__init__(self)
36 36 self.context = context
37 37 self.transport, self.ip, self.port = addr
38 38 if self.port == 0:
39 39 if addr[0] == 'tcp':
40 40 s = socket.socket()
41 41 # '*' means all interfaces to 0MQ, which is '' to socket.socket
42 42 s.bind(('' if self.ip == '*' else self.ip, 0))
43 43 self.port = s.getsockname()[1]
44 44 s.close()
45 45 elif addr[0] == 'ipc':
46 46 self.port = 1
47 47 while os.path.exists("%s-%s" % (self.ip, self.port)):
48 48 self.port = self.port + 1
49 49 else:
50 50 raise ValueError("Unrecognized zmq transport: %s" % addr[0])
51 51 self.addr = (self.ip, self.port)
52 52 self.daemon = True
53 53
54 54 def run(self):
55 55 self.socket = self.context.socket(zmq.REP)
56 self.socket.linger = 1000
56 57 c = ':' if self.transport == 'tcp' else '-'
57 58 self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port))
58 59 while True:
59 60 try:
60 61 zmq.device(zmq.FORWARDER, self.socket, self.socket)
61 62 except zmq.ZMQError as e:
62 63 if e.errno == errno.EINTR:
63 64 continue
64 65 else:
65 66 raise
66 67 else:
67 68 break
@@ -1,411 +1,415 b''
1 1 """An Application for launching a kernel
2 2 """
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Imports
8 8 #-----------------------------------------------------------------------------
9 9
10 10 from __future__ import print_function
11 11
12 12 # Standard library imports
13 13 import atexit
14 14 import os
15 15 import sys
16 16 import signal
17 17
18 18 # System library imports
19 19 import zmq
20 20 from zmq.eventloop import ioloop
21 21 from zmq.eventloop.zmqstream import ZMQStream
22 22
23 23 # IPython imports
24 24 from IPython.core.ultratb import FormattedTB
25 25 from IPython.core.application import (
26 26 BaseIPythonApplication, base_flags, base_aliases, catch_config_error
27 27 )
28 28 from IPython.core.profiledir import ProfileDir
29 29 from IPython.core.shellapp import (
30 30 InteractiveShellApp, shell_flags, shell_aliases
31 31 )
32 32 from IPython.utils import io
33 33 from IPython.utils.path import filefind
34 34 from IPython.utils.traitlets import (
35 35 Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName,
36 36 )
37 37 from IPython.utils.importstring import import_item
38 38 from IPython.kernel import write_connection_file
39 39 from IPython.kernel.connect import ConnectionFileMixin
40 40
41 41 # local imports
42 42 from .heartbeat import Heartbeat
43 43 from .ipkernel import Kernel
44 44 from .parentpoller import ParentPollerUnix, ParentPollerWindows
45 45 from .session import (
46 46 Session, session_flags, session_aliases, default_secure,
47 47 )
48 48 from .zmqshell import ZMQInteractiveShell
49 49
50 50 #-----------------------------------------------------------------------------
51 51 # Flags and Aliases
52 52 #-----------------------------------------------------------------------------
53 53
54 54 kernel_aliases = dict(base_aliases)
55 55 kernel_aliases.update({
56 56 'ip' : 'IPKernelApp.ip',
57 57 'hb' : 'IPKernelApp.hb_port',
58 58 'shell' : 'IPKernelApp.shell_port',
59 59 'iopub' : 'IPKernelApp.iopub_port',
60 60 'stdin' : 'IPKernelApp.stdin_port',
61 61 'control' : 'IPKernelApp.control_port',
62 62 'f' : 'IPKernelApp.connection_file',
63 63 'parent': 'IPKernelApp.parent_handle',
64 64 'transport': 'IPKernelApp.transport',
65 65 })
66 66 if sys.platform.startswith('win'):
67 67 kernel_aliases['interrupt'] = 'IPKernelApp.interrupt'
68 68
69 69 kernel_flags = dict(base_flags)
70 70 kernel_flags.update({
71 71 'no-stdout' : (
72 72 {'IPKernelApp' : {'no_stdout' : True}},
73 73 "redirect stdout to the null device"),
74 74 'no-stderr' : (
75 75 {'IPKernelApp' : {'no_stderr' : True}},
76 76 "redirect stderr to the null device"),
77 77 'pylab' : (
78 78 {'IPKernelApp' : {'pylab' : 'auto'}},
79 79 """Pre-load matplotlib and numpy for interactive use with
80 80 the default matplotlib backend."""),
81 81 })
82 82
83 83 # inherit flags&aliases for any IPython shell apps
84 84 kernel_aliases.update(shell_aliases)
85 85 kernel_flags.update(shell_flags)
86 86
87 87 # inherit flags&aliases for Sessions
88 88 kernel_aliases.update(session_aliases)
89 89 kernel_flags.update(session_flags)
90 90
91 91 _ctrl_c_message = """\
92 92 NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
93 93
94 94 To exit, you will have to explicitly quit this process, by either sending
95 95 "quit" from a client, or using Ctrl-\\ in UNIX-like environments.
96 96
97 97 To read more about this, see https://github.com/ipython/ipython/issues/2049
98 98
99 99 """
100 100
101 101 #-----------------------------------------------------------------------------
102 102 # Application class for starting an IPython Kernel
103 103 #-----------------------------------------------------------------------------
104 104
105 105 class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
106 106 ConnectionFileMixin):
107 107 name='ipkernel'
108 108 aliases = Dict(kernel_aliases)
109 109 flags = Dict(kernel_flags)
110 110 classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
111 111 # the kernel class, as an importstring
112 112 kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True,
113 113 help="""The Kernel subclass to be used.
114 114
115 115 This should allow easy re-use of the IPKernelApp entry point
116 116 to configure and launch kernels other than IPython's own.
117 117 """)
118 118 kernel = Any()
119 119 poller = Any() # don't restrict this even though current pollers are all Threads
120 120 heartbeat = Instance(Heartbeat)
121 121 session = Instance('IPython.kernel.zmq.session.Session')
122 122 ports = Dict()
123 123
124 124 # ipkernel doesn't get its own config file
125 125 def _config_file_name_default(self):
126 126 return 'ipython_config.py'
127 127
128 128 # inherit config file name from parent:
129 129 parent_appname = Unicode(config=True)
130 130 def _parent_appname_changed(self, name, old, new):
131 131 if self.config_file_specified:
132 132 # it was manually specified, ignore
133 133 return
134 134 self.config_file_name = new.replace('-','_') + u'_config.py'
135 135 # don't let this count as specifying the config file
136 136 self.config_file_specified.remove(self.config_file_name)
137 137
138 138 # connection info:
139 139
140 140 @property
141 141 def abs_connection_file(self):
142 142 if os.path.basename(self.connection_file) == self.connection_file:
143 143 return os.path.join(self.profile_dir.security_dir, self.connection_file)
144 144 else:
145 145 return self.connection_file
146 146
147 147
148 148 # streams, etc.
149 149 no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
150 150 no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
151 151 outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream',
152 152 config=True, help="The importstring for the OutStream factory")
153 153 displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook',
154 154 config=True, help="The importstring for the DisplayHook factory")
155 155
156 156 # polling
157 157 parent_handle = Integer(0, config=True,
158 158 help="""kill this process if its parent dies. On Windows, the argument
159 159 specifies the HANDLE of the parent process, otherwise it is simply boolean.
160 160 """)
161 161 interrupt = Integer(0, config=True,
162 162 help="""ONLY USED ON WINDOWS
163 163 Interrupt this process when the parent is signaled.
164 164 """)
165 165
166 166 def init_crash_handler(self):
167 167 # Install minimal exception handling
168 168 sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
169 169 ostream=sys.__stdout__)
170 170
171 171 def init_poller(self):
172 172 if sys.platform == 'win32':
173 173 if self.interrupt or self.parent_handle:
174 174 self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
175 175 elif self.parent_handle:
176 176 self.poller = ParentPollerUnix()
177 177
178 178 def _bind_socket(self, s, port):
179 179 iface = '%s://%s' % (self.transport, self.ip)
180 180 if self.transport == 'tcp':
181 181 if port <= 0:
182 182 port = s.bind_to_random_port(iface)
183 183 else:
184 184 s.bind("tcp://%s:%i" % (self.ip, port))
185 185 elif self.transport == 'ipc':
186 186 if port <= 0:
187 187 port = 1
188 188 path = "%s-%i" % (self.ip, port)
189 189 while os.path.exists(path):
190 190 port = port + 1
191 191 path = "%s-%i" % (self.ip, port)
192 192 else:
193 193 path = "%s-%i" % (self.ip, port)
194 194 s.bind("ipc://%s" % path)
195 195 return port
196 196
197 197 def write_connection_file(self):
198 198 """write connection info to JSON file"""
199 199 cf = self.abs_connection_file
200 200 self.log.debug("Writing connection file: %s", cf)
201 201 write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
202 202 shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
203 203 iopub_port=self.iopub_port, control_port=self.control_port)
204 204
205 205 def cleanup_connection_file(self):
206 206 cf = self.abs_connection_file
207 207 self.log.debug("Cleaning up connection file: %s", cf)
208 208 try:
209 209 os.remove(cf)
210 210 except (IOError, OSError):
211 211 pass
212 212
213 213 self.cleanup_ipc_files()
214 214
215 215 def init_connection_file(self):
216 216 if not self.connection_file:
217 217 self.connection_file = "kernel-%s.json"%os.getpid()
218 218 try:
219 219 self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
220 220 except IOError:
221 221 self.log.debug("Connection file not found: %s", self.connection_file)
222 222 # This means I own it, so I will clean it up:
223 223 atexit.register(self.cleanup_connection_file)
224 224 return
225 225 try:
226 226 self.load_connection_file()
227 227 except Exception:
228 228 self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
229 229 self.exit(1)
230 230
231 231 def init_sockets(self):
232 232 # Create a context, a session, and the kernel sockets.
233 233 self.log.info("Starting the kernel at pid: %i", os.getpid())
234 234 context = zmq.Context.instance()
235 235 # Uncomment this to try closing the context.
236 236 # atexit.register(context.term)
237 237
238 238 self.shell_socket = context.socket(zmq.ROUTER)
239 self.shell_socket.linger = 1000
239 240 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
240 241 self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
241 242
242 243 self.iopub_socket = context.socket(zmq.PUB)
244 self.iopub_socket.linger = 1000
243 245 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
244 246 self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
245 247
246 248 self.stdin_socket = context.socket(zmq.ROUTER)
249 self.stdin_socket.linger = 1000
247 250 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
248 251 self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
249 252
250 253 self.control_socket = context.socket(zmq.ROUTER)
254 self.control_socket.linger = 1000
251 255 self.control_port = self._bind_socket(self.control_socket, self.control_port)
252 256 self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
253 257
254 258 def init_heartbeat(self):
255 259 """start the heart beating"""
256 260 # heartbeat doesn't share context, because it mustn't be blocked
257 261 # by the GIL, which is accessed by libzmq when freeing zero-copy messages
258 262 hb_ctx = zmq.Context()
259 263 self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
260 264 self.hb_port = self.heartbeat.port
261 265 self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
262 266 self.heartbeat.start()
263 267
264 268 def log_connection_info(self):
265 269 """display connection info, and store ports"""
266 270 basename = os.path.basename(self.connection_file)
267 271 if basename == self.connection_file or \
268 272 os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
269 273 # use shortname
270 274 tail = basename
271 275 if self.profile != 'default':
272 276 tail += " --profile %s" % self.profile
273 277 else:
274 278 tail = self.connection_file
275 279 lines = [
276 280 "To connect another client to this kernel, use:",
277 281 " --existing %s" % tail,
278 282 ]
279 283 # log connection info
280 284 # info-level, so often not shown.
281 285 # frontends should use the %connect_info magic
282 286 # to see the connection info
283 287 for line in lines:
284 288 self.log.info(line)
285 289 # also raw print to the terminal if no parent_handle (`ipython kernel`)
286 290 if not self.parent_handle:
287 291 io.rprint(_ctrl_c_message)
288 292 for line in lines:
289 293 io.rprint(line)
290 294
291 295 self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
292 296 stdin=self.stdin_port, hb=self.hb_port,
293 297 control=self.control_port)
294 298
295 299 def init_session(self):
296 300 """create our session object"""
297 301 default_secure(self.config)
298 302 self.session = Session(parent=self, username=u'kernel')
299 303
300 304 def init_blackhole(self):
301 305 """redirects stdout/stderr to devnull if necessary"""
302 306 if self.no_stdout or self.no_stderr:
303 307 blackhole = open(os.devnull, 'w')
304 308 if self.no_stdout:
305 309 sys.stdout = sys.__stdout__ = blackhole
306 310 if self.no_stderr:
307 311 sys.stderr = sys.__stderr__ = blackhole
308 312
309 313 def init_io(self):
310 314 """Redirect input streams and set a display hook."""
311 315 if self.outstream_class:
312 316 outstream_factory = import_item(str(self.outstream_class))
313 317 sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
314 318 sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
315 319 if self.displayhook_class:
316 320 displayhook_factory = import_item(str(self.displayhook_class))
317 321 sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
318 322
319 323 def init_signal(self):
320 324 signal.signal(signal.SIGINT, signal.SIG_IGN)
321 325
322 326 def init_kernel(self):
323 327 """Create the Kernel object itself"""
324 328 shell_stream = ZMQStream(self.shell_socket)
325 329 control_stream = ZMQStream(self.control_socket)
326 330
327 331 kernel_factory = import_item(str(self.kernel_class))
328 332
329 333 kernel = kernel_factory(parent=self, session=self.session,
330 334 shell_streams=[shell_stream, control_stream],
331 335 iopub_socket=self.iopub_socket,
332 336 stdin_socket=self.stdin_socket,
333 337 log=self.log,
334 338 profile_dir=self.profile_dir,
335 339 user_ns=self.user_ns,
336 340 )
337 341 kernel.record_ports(self.ports)
338 342 self.kernel = kernel
339 343
340 344 def init_gui_pylab(self):
341 345 """Enable GUI event loop integration, taking pylab into account."""
342 346
343 347 # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
344 348 # to ensure that any exception is printed straight to stderr.
345 349 # Normally _showtraceback associates the reply with an execution,
346 350 # which means frontends will never draw it, as this exception
347 351 # is not associated with any execute request.
348 352
349 353 shell = self.shell
350 354 _showtraceback = shell._showtraceback
351 355 try:
352 356 # replace pyerr-sending traceback with stderr
353 357 def print_tb(etype, evalue, stb):
354 358 print ("GUI event loop or pylab initialization failed",
355 359 file=io.stderr)
356 360 print (shell.InteractiveTB.stb2text(stb), file=io.stderr)
357 361 shell._showtraceback = print_tb
358 362 InteractiveShellApp.init_gui_pylab(self)
359 363 finally:
360 364 shell._showtraceback = _showtraceback
361 365
362 366 def init_shell(self):
363 367 self.shell = self.kernel.shell
364 368 self.shell.configurables.append(self)
365 369
366 370 @catch_config_error
367 371 def initialize(self, argv=None):
368 372 super(IPKernelApp, self).initialize(argv)
369 373 self.init_blackhole()
370 374 self.init_connection_file()
371 375 self.init_session()
372 376 self.init_poller()
373 377 self.init_sockets()
374 378 self.init_heartbeat()
375 379 # writing/displaying connection info must be *after* init_sockets/heartbeat
376 380 self.log_connection_info()
377 381 self.write_connection_file()
378 382 self.init_io()
379 383 self.init_signal()
380 384 self.init_kernel()
381 385 # shell init steps
382 386 self.init_path()
383 387 self.init_shell()
384 388 self.init_gui_pylab()
385 389 self.init_extensions()
386 390 self.init_code()
387 391 # flush stdout/stderr, so that anything written to these streams during
388 392 # initialization do not get associated with the first execution request
389 393 sys.stdout.flush()
390 394 sys.stderr.flush()
391 395
392 396 def start(self):
393 397 if self.poller is not None:
394 398 self.poller.start()
395 399 self.kernel.start()
396 400 try:
397 401 ioloop.IOLoop.instance().start()
398 402 except KeyboardInterrupt:
399 403 pass
400 404
401 405 launch_new_instance = IPKernelApp.launch_instance
402 406
403 407 def main():
404 408 """Run an IPKernel as an application"""
405 409 app = IPKernelApp.instance()
406 410 app.initialize()
407 411 app.start()
408 412
409 413
410 414 if __name__ == '__main__':
411 415 main()
@@ -1,594 +1,625 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Process Controller
3 3
4 4 This module runs one or more subprocesses which will actually run the IPython
5 5 test suite.
6 6
7 7 """
8 8
9 #-----------------------------------------------------------------------------
10 # Copyright (C) 2009-2011 The IPython Development Team
11 #
12 # Distributed under the terms of the BSD License. The full license is in
13 # the file COPYING, distributed as part of this software.
14 #-----------------------------------------------------------------------------
15
16 #-----------------------------------------------------------------------------
17 # Imports
18 #-----------------------------------------------------------------------------
9 # Copyright (c) IPython Development Team.
10 # Distributed under the terms of the Modified BSD License.
11
19 12 from __future__ import print_function
20 13
21 14 import argparse
22 15 import json
23 16 import multiprocessing.pool
24 17 import os
25 18 import shutil
26 19 import signal
27 20 import sys
28 21 import subprocess
29 22 import time
30 23
31 24 from .iptest import have, test_group_names as py_test_group_names, test_sections, StreamCapturer
32 25 from IPython.utils.path import compress_user
33 26 from IPython.utils.py3compat import bytes_to_str
34 27 from IPython.utils.sysinfo import get_sys_info
35 28 from IPython.utils.tempdir import TemporaryDirectory
36 29
30 try:
31 # Python >= 3.3
32 from subprocess import TimeoutExpired
33 def popen_wait(p, timeout):
34 return p.wait(timeout)
35 except ImportError:
36 class TimeoutExpired(Exception):
37 pass
38 def popen_wait(p, timeout):
39 """backport of Popen.wait from Python 3"""
40 for i in range(int(10 * timeout)):
41 if p.poll() is not None:
42 return
43 time.sleep(0.1)
44 if p.poll() is None:
45 raise TimeoutExpired
46
47 NOTEBOOK_SHUTDOWN_TIMEOUT = 10
37 48
38 49 class TestController(object):
39 50 """Run tests in a subprocess
40 51 """
41 52 #: str, IPython test suite to be executed.
42 53 section = None
43 54 #: list, command line arguments to be executed
44 55 cmd = None
45 56 #: dict, extra environment variables to set for the subprocess
46 57 env = None
47 58 #: list, TemporaryDirectory instances to clear up when the process finishes
48 59 dirs = None
49 60 #: subprocess.Popen instance
50 61 process = None
51 62 #: str, process stdout+stderr
52 63 stdout = None
53 64
54 65 def __init__(self):
55 66 self.cmd = []
56 67 self.env = {}
57 68 self.dirs = []
58 69
59 70 def setup(self):
60 71 """Create temporary directories etc.
61 72
62 73 This is only called when we know the test group will be run. Things
63 74 created here may be cleaned up by self.cleanup().
64 75 """
65 76 pass
66 77
67 78 def launch(self, buffer_output=False):
68 79 # print('*** ENV:', self.env) # dbg
69 80 # print('*** CMD:', self.cmd) # dbg
70 81 env = os.environ.copy()
71 82 env.update(self.env)
72 83 output = subprocess.PIPE if buffer_output else None
73 84 stdout = subprocess.STDOUT if buffer_output else None
74 85 self.process = subprocess.Popen(self.cmd, stdout=output,
75 86 stderr=stdout, env=env)
76 87
77 88 def wait(self):
78 89 self.stdout, _ = self.process.communicate()
79 90 return self.process.returncode
80 91
81 92 def print_extra_info(self):
82 93 """Print extra information about this test run.
83 94
84 95 If we're running in parallel and showing the concise view, this is only
85 96 called if the test group fails. Otherwise, it's called before the test
86 97 group is started.
87 98
88 99 The base implementation does nothing, but it can be overridden by
89 100 subclasses.
90 101 """
91 102 return
92 103
93 104 def cleanup_process(self):
94 105 """Cleanup on exit by killing any leftover processes."""
95 106 subp = self.process
96 107 if subp is None or (subp.poll() is not None):
97 108 return # Process doesn't exist, or is already dead.
98 109
99 110 try:
100 111 print('Cleaning up stale PID: %d' % subp.pid)
101 112 subp.kill()
102 113 except: # (OSError, WindowsError) ?
103 114 # This is just a best effort, if we fail or the process was
104 115 # really gone, ignore it.
105 116 pass
106 117 else:
107 118 for i in range(10):
108 119 if subp.poll() is None:
109 120 time.sleep(0.1)
110 121 else:
111 122 break
112 123
113 124 if subp.poll() is None:
114 125 # The process did not die...
115 126 print('... failed. Manual cleanup may be required.')
116 127
117 128 def cleanup(self):
118 129 "Kill process if it's still alive, and clean up temporary directories"
119 130 self.cleanup_process()
120 131 for td in self.dirs:
121 132 td.cleanup()
122 133
123 134 __del__ = cleanup
124 135
125 136 class PyTestController(TestController):
126 137 """Run Python tests using IPython.testing.iptest"""
127 138 #: str, Python command to execute in subprocess
128 139 pycmd = None
129 140
130 141 def __init__(self, section, options):
131 142 """Create new test runner."""
132 143 TestController.__init__(self)
133 144 self.section = section
134 145 # pycmd is put into cmd[2] in PyTestController.launch()
135 146 self.cmd = [sys.executable, '-c', None, section]
136 147 self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
137 148 self.options = options
138 149
139 150 def setup(self):
140 151 ipydir = TemporaryDirectory()
141 152 self.dirs.append(ipydir)
142 153 self.env['IPYTHONDIR'] = ipydir.name
143 154 self.workingdir = workingdir = TemporaryDirectory()
144 155 self.dirs.append(workingdir)
145 156 self.env['IPTEST_WORKING_DIR'] = workingdir.name
146 157 # This means we won't get odd effects from our own matplotlib config
147 158 self.env['MPLCONFIGDIR'] = workingdir.name
148 159
149 160 # From options:
150 161 if self.options.xunit:
151 162 self.add_xunit()
152 163 if self.options.coverage:
153 164 self.add_coverage()
154 165 self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
155 166 self.cmd.extend(self.options.extra_args)
156 167
157 168 @property
158 169 def will_run(self):
159 170 try:
160 171 return test_sections[self.section].will_run
161 172 except KeyError:
162 173 return True
163 174
164 175 def add_xunit(self):
165 176 xunit_file = os.path.abspath(self.section + '.xunit.xml')
166 177 self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
167 178
168 179 def add_coverage(self):
169 180 try:
170 181 sources = test_sections[self.section].includes
171 182 except KeyError:
172 183 sources = ['IPython']
173 184
174 185 coverage_rc = ("[run]\n"
175 186 "data_file = {data_file}\n"
176 187 "source =\n"
177 188 " {source}\n"
178 189 ).format(data_file=os.path.abspath('.coverage.'+self.section),
179 190 source="\n ".join(sources))
180 191 config_file = os.path.join(self.workingdir.name, '.coveragerc')
181 192 with open(config_file, 'w') as f:
182 193 f.write(coverage_rc)
183 194
184 195 self.env['COVERAGE_PROCESS_START'] = config_file
185 196 self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
186 197
187 198 def launch(self, buffer_output=False):
188 199 self.cmd[2] = self.pycmd
189 200 super(PyTestController, self).launch(buffer_output=buffer_output)
190 201
191 202 js_prefix = 'js/'
192 203
193 204 def get_js_test_dir():
194 205 import IPython.html.tests as t
195 206 return os.path.join(os.path.dirname(t.__file__), '')
196 207
197 208 def all_js_groups():
198 209 import glob
199 210 test_dir = get_js_test_dir()
200 211 all_subdirs = glob.glob(test_dir + '*/')
201 212 return [js_prefix+os.path.relpath(x, test_dir) for x in all_subdirs if os.path.relpath(x, test_dir) != '__pycache__']
202 213
203 214 class JSController(TestController):
204 215 """Run CasperJS tests """
205 216 def __init__(self, section):
206 217 """Create new test runner."""
207 218 TestController.__init__(self)
208 219 self.section = section
209 220 js_test_dir = get_js_test_dir()
210 221 includes = '--includes=' + os.path.join(js_test_dir,'util.js')
211 222 test_cases = os.path.join(js_test_dir, self.section[len(js_prefix):])
212 223 self.cmd = ['casperjs', 'test', includes, test_cases]
213 224
214 225 def setup(self):
215 226 self.ipydir = TemporaryDirectory()
216 227 self.nbdir = TemporaryDirectory()
217 228 self.dirs.append(self.ipydir)
218 229 self.dirs.append(self.nbdir)
219 230 os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub βˆ‚ir1', u'sub βˆ‚ir 1a')))
220 231 os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub βˆ‚ir2', u'sub βˆ‚ir 1b')))
221 232
222 233 # start the ipython notebook, so we get the port number
223 234 self.server_port = 0
224 235 self._init_server()
225 236 if self.server_port:
226 237 self.cmd.append("--port=%i" % self.server_port)
227 238 else:
228 239 # don't launch tests if the server didn't start
229 240 self.cmd = [sys.executable, '-c', 'raise SystemExit(1)']
230 241
231 242 def print_extra_info(self):
232 243 print("Running tests with notebook directory %r" % self.nbdir.name)
233 244
234 245 @property
235 246 def will_run(self):
236 247 return all(have[a] for a in ['zmq', 'tornado', 'jinja2', 'casperjs', 'sqlite3'])
237 248
238 249 def _init_server(self):
239 250 "Start the notebook server in a separate process"
240 251 self.server_command = command = [sys.executable,
241 252 '-m', 'IPython.html',
242 253 '--no-browser',
243 254 '--ipython-dir', self.ipydir.name,
244 255 '--notebook-dir', self.nbdir.name,
245 256 ]
246 257 # ipc doesn't work on Windows, and darwin has crazy-long temp paths,
247 258 # which run afoul of ipc's maximum path length.
248 259 if sys.platform.startswith('linux'):
249 260 command.append('--KernelManager.transport=ipc')
250 261 self.stream_capturer = c = StreamCapturer()
251 262 c.start()
252 263 self.server = subprocess.Popen(command, stdout=c.writefd, stderr=subprocess.STDOUT)
253 264 self.server_info_file = os.path.join(self.ipydir.name,
254 265 'profile_default', 'security', 'nbserver-%i.json' % self.server.pid
255 266 )
256 267 self._wait_for_server()
257 268
258 269 def _wait_for_server(self):
259 270 """Wait 30 seconds for the notebook server to start"""
260 271 for i in range(300):
261 272 if self.server.poll() is not None:
262 273 return self._failed_to_start()
263 274 if os.path.exists(self.server_info_file):
264 275 self._load_server_info()
265 276 return
266 277 time.sleep(0.1)
267 278 print("Notebook server-info file never arrived: %s" % self.server_info_file,
268 279 file=sys.stderr
269 280 )
270 281
271 282 def _failed_to_start(self):
272 283 """Notebook server exited prematurely"""
273 284 captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
274 285 print("Notebook failed to start: ", file=sys.stderr)
275 286 print(self.server_command)
276 287 print(captured, file=sys.stderr)
277 288
278 289 def _load_server_info(self):
279 290 """Notebook server started, load connection info from JSON"""
280 291 with open(self.server_info_file) as f:
281 292 info = json.load(f)
282 293 self.server_port = info['port']
283 294
284 295 def cleanup(self):
285 296 try:
286 297 self.server.terminate()
287 298 except OSError:
288 299 # already dead
289 300 pass
290 self.server.wait()
301 # wait 10s for the server to shutdown
302 try:
303 popen_wait(self.server, NOTEBOOK_SHUTDOWN_TIMEOUT)
304 except TimeoutExpired:
305 # server didn't terminate, kill it
306 try:
307 print("Failed to terminate notebook server, killing it.",
308 file=sys.stderr
309 )
310 self.server.kill()
311 except OSError:
312 # already dead
313 pass
314 # wait another 10s
315 try:
316 popen_wait(self.server, NOTEBOOK_SHUTDOWN_TIMEOUT)
317 except TimeoutExpired:
318 print("Notebook server still running (%s)" % self.server_info_file,
319 file=sys.stderr
320 )
321
291 322 self.stream_capturer.halt()
292 323 TestController.cleanup(self)
293 324
294 325
295 326 def prepare_controllers(options):
296 327 """Returns two lists of TestController instances, those to run, and those
297 328 not to run."""
298 329 testgroups = options.testgroups
299 330
300 331 if testgroups:
301 332 py_testgroups = [g for g in testgroups if (g in py_test_group_names) \
302 333 or g.startswith('IPython.')]
303 334 if 'js' in testgroups:
304 335 js_testgroups = all_js_groups()
305 336 else:
306 337 js_testgroups = [g for g in testgroups if g not in py_testgroups]
307 338 else:
308 339 py_testgroups = py_test_group_names
309 340 if not options.all:
310 341 js_testgroups = []
311 342 test_sections['parallel'].enabled = False
312 343 else:
313 344 js_testgroups = all_js_groups()
314 345
315 346 c_js = [JSController(name) for name in js_testgroups]
316 347 c_py = [PyTestController(name, options) for name in py_testgroups]
317 348
318 349 controllers = c_py + c_js
319 350 to_run = [c for c in controllers if c.will_run]
320 351 not_run = [c for c in controllers if not c.will_run]
321 352 return to_run, not_run
322 353
323 354 def do_run(controller, buffer_output=True):
324 355 """Setup and run a test controller.
325 356
326 357 If buffer_output is True, no output is displayed, to avoid it appearing
327 358 interleaved. In this case, the caller is responsible for displaying test
328 359 output on failure.
329 360
330 361 Returns
331 362 -------
332 363 controller : TestController
333 364 The same controller as passed in, as a convenience for using map() type
334 365 APIs.
335 366 exitcode : int
336 367 The exit code of the test subprocess. Non-zero indicates failure.
337 368 """
338 369 try:
339 370 try:
340 371 controller.setup()
341 372 if not buffer_output:
342 373 controller.print_extra_info()
343 374 controller.launch(buffer_output=buffer_output)
344 375 except Exception:
345 376 import traceback
346 377 traceback.print_exc()
347 378 return controller, 1 # signal failure
348 379
349 380 exitcode = controller.wait()
350 381 return controller, exitcode
351 382
352 383 except KeyboardInterrupt:
353 384 return controller, -signal.SIGINT
354 385 finally:
355 386 controller.cleanup()
356 387
357 388 def report():
358 389 """Return a string with a summary report of test-related variables."""
359 390 inf = get_sys_info()
360 391 out = []
361 392 def _add(name, value):
362 393 out.append((name, value))
363 394
364 395 _add('IPython version', inf['ipython_version'])
365 396 _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
366 397 _add('IPython package', compress_user(inf['ipython_path']))
367 398 _add('Python version', inf['sys_version'].replace('\n',''))
368 399 _add('sys.executable', compress_user(inf['sys_executable']))
369 400 _add('Platform', inf['platform'])
370 401
371 402 width = max(len(n) for (n,v) in out)
372 403 out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
373 404
374 405 avail = []
375 406 not_avail = []
376 407
377 408 for k, is_avail in have.items():
378 409 if is_avail:
379 410 avail.append(k)
380 411 else:
381 412 not_avail.append(k)
382 413
383 414 if avail:
384 415 out.append('\nTools and libraries available at test time:\n')
385 416 avail.sort()
386 417 out.append(' ' + ' '.join(avail)+'\n')
387 418
388 419 if not_avail:
389 420 out.append('\nTools and libraries NOT available at test time:\n')
390 421 not_avail.sort()
391 422 out.append(' ' + ' '.join(not_avail)+'\n')
392 423
393 424 return ''.join(out)
394 425
395 426 def run_iptestall(options):
396 427 """Run the entire IPython test suite by calling nose and trial.
397 428
398 429 This function constructs :class:`IPTester` instances for all IPython
399 430 modules and package and then runs each of them. This causes the modules
400 431 and packages of IPython to be tested each in their own subprocess using
401 432 nose.
402 433
403 434 Parameters
404 435 ----------
405 436
406 437 All parameters are passed as attributes of the options object.
407 438
408 439 testgroups : list of str
409 440 Run only these sections of the test suite. If empty, run all the available
410 441 sections.
411 442
412 443 fast : int or None
413 444 Run the test suite in parallel, using n simultaneous processes. If None
414 445 is passed, one process is used per CPU core. Default 1 (i.e. sequential)
415 446
416 447 inc_slow : bool
417 448 Include slow tests, like IPython.parallel. By default, these tests aren't
418 449 run.
419 450
420 451 xunit : bool
421 452 Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
422 453
423 454 coverage : bool or str
424 455 Measure code coverage from tests. True will store the raw coverage data,
425 456 or pass 'html' or 'xml' to get reports.
426 457
427 458 extra_args : list
428 459 Extra arguments to pass to the test subprocesses, e.g. '-v'
429 460 """
430 461 to_run, not_run = prepare_controllers(options)
431 462
432 463 def justify(ltext, rtext, width=70, fill='-'):
433 464 ltext += ' '
434 465 rtext = (' ' + rtext).rjust(width - len(ltext), fill)
435 466 return ltext + rtext
436 467
437 468 # Run all test runners, tracking execution time
438 469 failed = []
439 470 t_start = time.time()
440 471
441 472 print()
442 473 if options.fast == 1:
443 474 # This actually means sequential, i.e. with 1 job
444 475 for controller in to_run:
445 476 print('Test group:', controller.section)
446 477 sys.stdout.flush() # Show in correct order when output is piped
447 478 controller, res = do_run(controller, buffer_output=False)
448 479 if res:
449 480 failed.append(controller)
450 481 if res == -signal.SIGINT:
451 482 print("Interrupted")
452 483 break
453 484 print()
454 485
455 486 else:
456 487 # Run tests concurrently
457 488 try:
458 489 pool = multiprocessing.pool.ThreadPool(options.fast)
459 490 for (controller, res) in pool.imap_unordered(do_run, to_run):
460 491 res_string = 'OK' if res == 0 else 'FAILED'
461 492 print(justify('Test group: ' + controller.section, res_string))
462 493 if res:
463 494 controller.print_extra_info()
464 495 print(bytes_to_str(controller.stdout))
465 496 failed.append(controller)
466 497 if res == -signal.SIGINT:
467 498 print("Interrupted")
468 499 break
469 500 except KeyboardInterrupt:
470 501 return
471 502
472 503 for controller in not_run:
473 504 print(justify('Test group: ' + controller.section, 'NOT RUN'))
474 505
475 506 t_end = time.time()
476 507 t_tests = t_end - t_start
477 508 nrunners = len(to_run)
478 509 nfail = len(failed)
479 510 # summarize results
480 511 print('_'*70)
481 512 print('Test suite completed for system with the following information:')
482 513 print(report())
483 514 took = "Took %.3fs." % t_tests
484 515 print('Status: ', end='')
485 516 if not failed:
486 517 print('OK (%d test groups).' % nrunners, took)
487 518 else:
488 519 # If anything went wrong, point out what command to rerun manually to
489 520 # see the actual errors and individual summary
490 521 failed_sections = [c.section for c in failed]
491 522 print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
492 523 nrunners, ', '.join(failed_sections)), took)
493 524 print()
494 525 print('You may wish to rerun these, with:')
495 526 print(' iptest', *failed_sections)
496 527 print()
497 528
498 529 if options.coverage:
499 530 from coverage import coverage
500 531 cov = coverage(data_file='.coverage')
501 532 cov.combine()
502 533 cov.save()
503 534
504 535 # Coverage HTML report
505 536 if options.coverage == 'html':
506 537 html_dir = 'ipy_htmlcov'
507 538 shutil.rmtree(html_dir, ignore_errors=True)
508 539 print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
509 540 sys.stdout.flush()
510 541
511 542 # Custom HTML reporter to clean up module names.
512 543 from coverage.html import HtmlReporter
513 544 class CustomHtmlReporter(HtmlReporter):
514 545 def find_code_units(self, morfs):
515 546 super(CustomHtmlReporter, self).find_code_units(morfs)
516 547 for cu in self.code_units:
517 548 nameparts = cu.name.split(os.sep)
518 549 if 'IPython' not in nameparts:
519 550 continue
520 551 ix = nameparts.index('IPython')
521 552 cu.name = '.'.join(nameparts[ix:])
522 553
523 554 # Reimplement the html_report method with our custom reporter
524 555 cov._harvest_data()
525 556 cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
526 557 html_title='IPython test coverage',
527 558 )
528 559 reporter = CustomHtmlReporter(cov, cov.config)
529 560 reporter.report(None)
530 561 print('done.')
531 562
532 563 # Coverage XML report
533 564 elif options.coverage == 'xml':
534 565 cov.xml_report(outfile='ipy_coverage.xml')
535 566
536 567 if failed:
537 568 # Ensure that our exit code indicates failure
538 569 sys.exit(1)
539 570
540 571 argparser = argparse.ArgumentParser(description='Run IPython test suite')
541 572 argparser.add_argument('testgroups', nargs='*',
542 573 help='Run specified groups of tests. If omitted, run '
543 574 'all tests.')
544 575 argparser.add_argument('--all', action='store_true',
545 576 help='Include slow tests not run by default.')
546 577 argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
547 578 help='Run test sections in parallel. This starts as many '
548 579 'processes as you have cores, or you can specify a number.')
549 580 argparser.add_argument('--xunit', action='store_true',
550 581 help='Produce Xunit XML results')
551 582 argparser.add_argument('--coverage', nargs='?', const=True, default=False,
552 583 help="Measure test coverage. Specify 'html' or "
553 584 "'xml' to get reports.")
554 585 argparser.add_argument('--subproc-streams', default='capture',
555 586 help="What to do with stdout/stderr from subprocesses. "
556 587 "'capture' (default), 'show' and 'discard' are the options.")
557 588
558 589 def default_options():
559 590 """Get an argparse Namespace object with the default arguments, to pass to
560 591 :func:`run_iptestall`.
561 592 """
562 593 options = argparser.parse_args([])
563 594 options.extra_args = []
564 595 return options
565 596
566 597 def main():
567 598 # iptest doesn't work correctly if the working directory is the
568 599 # root of the IPython source tree. Tell the user to avoid
569 600 # frustration.
570 601 if os.path.exists(os.path.join(os.getcwd(),
571 602 'IPython', 'testing', '__main__.py')):
572 603 print("Don't run iptest from the IPython source directory",
573 604 file=sys.stderr)
574 605 sys.exit(1)
575 606 # Arguments after -- should be passed through to nose. Argparse treats
576 607 # everything after -- as regular positional arguments, so we separate them
577 608 # first.
578 609 try:
579 610 ix = sys.argv.index('--')
580 611 except ValueError:
581 612 to_parse = sys.argv[1:]
582 613 extra_args = []
583 614 else:
584 615 to_parse = sys.argv[1:ix]
585 616 extra_args = sys.argv[ix+1:]
586 617
587 618 options = argparser.parse_args(to_parse)
588 619 options.extra_args = extra_args
589 620
590 621 run_iptestall(options)
591 622
592 623
593 624 if __name__ == '__main__':
594 625 main()
General Comments 0
You need to be logged in to leave comments. Login now