##// END OF EJS Templates
finish kernel shims
Min RK -
Show More
@@ -0,0 +1,226 b''
1 """Utilities for launching kernels
2 """
3
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8 import sys
9 from subprocess import Popen, PIPE
10
11 from IPython.utils.encoding import getdefaultencoding
12 from IPython.utils.py3compat import cast_bytes_py2
13
14
15 def swallow_argv(argv, aliases=None, flags=None):
16 """strip frontend-specific aliases and flags from an argument list
17
18 For use primarily in frontend apps that want to pass a subset of command-line
19 arguments through to a subprocess, where frontend-specific flags and aliases
20 should be removed from the list.
21
22 Parameters
23 ----------
24
25 argv : list(str)
26 The starting argv, to be filtered
27 aliases : container of aliases (dict, list, set, etc.)
28 The frontend-specific aliases to be removed
29 flags : container of flags (dict, list, set, etc.)
30 The frontend-specific flags to be removed
31
32 Returns
33 -------
34
35 argv : list(str)
36 The argv list, excluding flags and aliases that have been stripped
37 """
38
39 if aliases is None:
40 aliases = set()
41 if flags is None:
42 flags = set()
43
44 stripped = list(argv) # copy
45
46 swallow_next = False
47 was_flag = False
48 for a in argv:
49 if a == '--':
50 break
51 if swallow_next:
52 swallow_next = False
53 # last arg was an alias, remove the next one
54 # *unless* the last alias has a no-arg flag version, in which
55 # case, don't swallow the next arg if it's also a flag:
56 if not (was_flag and a.startswith('-')):
57 stripped.remove(a)
58 continue
59 if a.startswith('-'):
60 split = a.lstrip('-').split('=')
61 name = split[0]
62 # we use startswith because argparse accepts any arg to be specified
63 # by any leading section, as long as it is unique,
64 # so `--no-br` means `--no-browser` in the notebook, etc.
65 if any(alias.startswith(name) for alias in aliases):
66 stripped.remove(a)
67 if len(split) == 1:
68 # alias passed with arg via space
69 swallow_next = True
70 # could have been a flag that matches an alias, e.g. `existing`
71 # in which case, we might not swallow the next arg
72 was_flag = name in flags
73 elif len(split) == 1 and any(flag.startswith(name) for flag in flags):
74 # strip flag, but don't swallow next, as flags don't take args
75 stripped.remove(a)
76
77 # return shortened list
78 return stripped
79
80
81 def make_ipkernel_cmd(mod='ipython_kernel', executable=None, extra_arguments=[], **kw):
82 """Build Popen command list for launching an IPython kernel.
83
84 Parameters
85 ----------
86 mod : str, optional (default 'ipython_kernel')
87 A string of an IPython module whose __main__ starts an IPython kernel
88
89 executable : str, optional (default sys.executable)
90 The Python executable to use for the kernel process.
91
92 extra_arguments : list, optional
93 A list of extra arguments to pass when executing the launch code.
94
95 Returns
96 -------
97
98 A Popen command list
99 """
100 if executable is None:
101 executable = sys.executable
102 arguments = [ executable, '-m', mod, '-f', '{connection_file}' ]
103 arguments.extend(extra_arguments)
104
105 return arguments
106
107
108 def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
109 independent=False,
110 cwd=None,
111 **kw
112 ):
113 """ Launches a localhost kernel, binding to the specified ports.
114
115 Parameters
116 ----------
117 cmd : Popen list,
118 A string of Python code that imports and executes a kernel entry point.
119
120 stdin, stdout, stderr : optional (default None)
121 Standards streams, as defined in subprocess.Popen.
122
123 independent : bool, optional (default False)
124 If set, the kernel process is guaranteed to survive if this process
125 dies. If not set, an effort is made to ensure that the kernel is killed
126 when this process dies. Note that in this case it is still good practice
127 to kill kernels manually before exiting.
128
129 cwd : path, optional
130 The working dir of the kernel process (default: cwd of this process).
131
132 Returns
133 -------
134
135 Popen instance for the kernel subprocess
136 """
137
138 # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
139 # are invalid. Unfortunately, there is in general no way to detect whether
140 # they are valid. The following two blocks redirect them to (temporary)
141 # pipes in certain important cases.
142
143 # If this process has been backgrounded, our stdin is invalid. Since there
144 # is no compelling reason for the kernel to inherit our stdin anyway, we'll
145 # place this one safe and always redirect.
146 redirect_in = True
147 _stdin = PIPE if stdin is None else stdin
148
149 # If this process in running on pythonw, we know that stdin, stdout, and
150 # stderr are all invalid.
151 redirect_out = sys.executable.endswith('pythonw.exe')
152 if redirect_out:
153 blackhole = open(os.devnull, 'w')
154 _stdout = blackhole if stdout is None else stdout
155 _stderr = blackhole if stderr is None else stderr
156 else:
157 _stdout, _stderr = stdout, stderr
158
159 env = env if (env is not None) else os.environ.copy()
160
161 encoding = getdefaultencoding(prefer_stream=False)
162 kwargs = dict(
163 stdin=_stdin,
164 stdout=_stdout,
165 stderr=_stderr,
166 cwd=cwd,
167 env=env,
168 )
169
170 # Spawn a kernel.
171 if sys.platform == 'win32':
172 # Popen on Python 2 on Windows cannot handle unicode args or cwd
173 cmd = [ cast_bytes_py2(c, encoding) for c in cmd ]
174 if cwd:
175 cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii')
176 kwargs['cwd'] = cwd
177
178 from jupyter_client.parentpoller import ParentPollerWindows
179 # Create a Win32 event for interrupting the kernel
180 # and store it in an environment variable.
181 interrupt_event = ParentPollerWindows.create_interrupt_event()
182 env["JPY_INTERRUPT_EVENT"] = str(interrupt_event)
183 # deprecated old env name:
184 env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"]
185
186 try:
187 from _winapi import DuplicateHandle, GetCurrentProcess, \
188 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
189 except:
190 from _subprocess import DuplicateHandle, GetCurrentProcess, \
191 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
192 # Launch the kernel process
193 if independent:
194 kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP
195 else:
196 pid = GetCurrentProcess()
197 handle = DuplicateHandle(pid, pid, pid, 0,
198 True, # Inheritable by new processes.
199 DUPLICATE_SAME_ACCESS)
200 env['JPY_PARENT_PID'] = str(int(handle))
201
202 proc = Popen(cmd, **kwargs)
203
204 # Attach the interrupt event to the Popen objet so it can be used later.
205 proc.win32_interrupt_event = interrupt_event
206
207 else:
208 if independent:
209 kwargs['preexec_fn'] = lambda: os.setsid()
210 else:
211 env['JPY_PARENT_PID'] = str(os.getpid())
212
213 proc = Popen(cmd, **kwargs)
214
215 # Clean up pipes created to work around Popen bug.
216 if redirect_in:
217 if stdin is None:
218 proc.stdin.close()
219
220 return proc
221
222 __all__ = [
223 'swallow_argv',
224 'make_ipkernel_cmd',
225 'launch_kernel',
226 ]
@@ -1,330 +1,331 b''
1 """ A minimal application base mixin for all ZMQ based IPython frontends.
1 """ A minimal application base mixin for all ZMQ based IPython frontends.
2
2
3 This is not a complete console app, as subprocess will not be able to receive
3 This is not a complete console app, as subprocess will not be able to receive
4 input, there is no real readline support, among other limitations. This is a
4 input, there is no real readline support, among other limitations. This is a
5 refactoring of what used to be the IPython/qt/console/qtconsoleapp.py
5 refactoring of what used to be the IPython/qt/console/qtconsoleapp.py
6 """
6 """
7 # Copyright (c) IPython Development Team.
7 # Copyright (c) IPython Development Team.
8 # Distributed under the terms of the Modified BSD License.
8 # Distributed under the terms of the Modified BSD License.
9
9
10 import atexit
10 import atexit
11 import os
11 import os
12 import signal
12 import signal
13 import sys
13 import sys
14 import uuid
14 import uuid
15
15
16
16
17 from IPython.config.application import boolean_flag
17 from IPython.config.application import boolean_flag
18 from IPython.core.profiledir import ProfileDir
18 from IPython.core.profiledir import ProfileDir
19 from IPython.kernel.blocking import BlockingKernelClient
19 from IPython.kernel.blocking import BlockingKernelClient
20 from IPython.kernel import KernelManager
20 from IPython.kernel import KernelManager
21 from IPython.kernel import tunnel_to_kernel, find_connection_file
21 from IPython.kernel import tunnel_to_kernel, find_connection_file
22 from IPython.kernel.kernelspec import NoSuchKernel
22 from IPython.kernel.kernelspec import NoSuchKernel
23 from IPython.utils.path import filefind
23 from IPython.utils.path import filefind
24 from IPython.utils.traitlets import (
24 from IPython.utils.traitlets import (
25 Dict, List, Unicode, CUnicode, CBool, Any
25 Dict, List, Unicode, CUnicode, CBool, Any
26 )
26 )
27 from IPython.kernel.zmq.session import Session
27 from IPython.kernel.zmq.session import Session
28 from IPython.kernel.connect import ConnectionFileMixin
28 from IPython.kernel import connect
29 ConnectionFileMixin = connect.ConnectionFileMixin
29
30
30 from IPython.utils.localinterfaces import localhost
31 from IPython.utils.localinterfaces import localhost
31
32
32 #-----------------------------------------------------------------------------
33 #-----------------------------------------------------------------------------
33 # Aliases and Flags
34 # Aliases and Flags
34 #-----------------------------------------------------------------------------
35 #-----------------------------------------------------------------------------
35
36
36 flags = {}
37 flags = {}
37
38
38 # the flags that are specific to the frontend
39 # the flags that are specific to the frontend
39 # these must be scrubbed before being passed to the kernel,
40 # these must be scrubbed before being passed to the kernel,
40 # or it will raise an error on unrecognized flags
41 # or it will raise an error on unrecognized flags
41 app_flags = {
42 app_flags = {
42 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}},
43 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}},
43 "Connect to an existing kernel. If no argument specified, guess most recent"),
44 "Connect to an existing kernel. If no argument specified, guess most recent"),
44 }
45 }
45 app_flags.update(boolean_flag(
46 app_flags.update(boolean_flag(
46 'confirm-exit', 'IPythonConsoleApp.confirm_exit',
47 'confirm-exit', 'IPythonConsoleApp.confirm_exit',
47 """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit',
48 """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit',
48 to force a direct exit without any confirmation.
49 to force a direct exit without any confirmation.
49 """,
50 """,
50 """Don't prompt the user when exiting. This will terminate the kernel
51 """Don't prompt the user when exiting. This will terminate the kernel
51 if it is owned by the frontend, and leave it alive if it is external.
52 if it is owned by the frontend, and leave it alive if it is external.
52 """
53 """
53 ))
54 ))
54 flags.update(app_flags)
55 flags.update(app_flags)
55
56
56 aliases = {}
57 aliases = {}
57
58
58 # also scrub aliases from the frontend
59 # also scrub aliases from the frontend
59 app_aliases = dict(
60 app_aliases = dict(
60 ip = 'IPythonConsoleApp.ip',
61 ip = 'IPythonConsoleApp.ip',
61 transport = 'IPythonConsoleApp.transport',
62 transport = 'IPythonConsoleApp.transport',
62 hb = 'IPythonConsoleApp.hb_port',
63 hb = 'IPythonConsoleApp.hb_port',
63 shell = 'IPythonConsoleApp.shell_port',
64 shell = 'IPythonConsoleApp.shell_port',
64 iopub = 'IPythonConsoleApp.iopub_port',
65 iopub = 'IPythonConsoleApp.iopub_port',
65 stdin = 'IPythonConsoleApp.stdin_port',
66 stdin = 'IPythonConsoleApp.stdin_port',
66 existing = 'IPythonConsoleApp.existing',
67 existing = 'IPythonConsoleApp.existing',
67 f = 'IPythonConsoleApp.connection_file',
68 f = 'IPythonConsoleApp.connection_file',
68
69
69 kernel = 'IPythonConsoleApp.kernel_name',
70 kernel = 'IPythonConsoleApp.kernel_name',
70
71
71 ssh = 'IPythonConsoleApp.sshserver',
72 ssh = 'IPythonConsoleApp.sshserver',
72 )
73 )
73 aliases.update(app_aliases)
74 aliases.update(app_aliases)
74
75
75 #-----------------------------------------------------------------------------
76 #-----------------------------------------------------------------------------
76 # Classes
77 # Classes
77 #-----------------------------------------------------------------------------
78 #-----------------------------------------------------------------------------
78
79
79 classes = [KernelManager, ProfileDir, Session]
80 classes = [KernelManager, ProfileDir, Session]
80
81
81 class IPythonConsoleApp(ConnectionFileMixin):
82 class IPythonConsoleApp(ConnectionFileMixin):
82 name = 'ipython-console-mixin'
83 name = 'ipython-console-mixin'
83
84
84 description = """
85 description = """
85 The IPython Mixin Console.
86 The IPython Mixin Console.
86
87
87 This class contains the common portions of console client (QtConsole,
88 This class contains the common portions of console client (QtConsole,
88 ZMQ-based terminal console, etc). It is not a full console, in that
89 ZMQ-based terminal console, etc). It is not a full console, in that
89 launched terminal subprocesses will not be able to accept input.
90 launched terminal subprocesses will not be able to accept input.
90
91
91 The Console using this mixing supports various extra features beyond
92 The Console using this mixing supports various extra features beyond
92 the single-process Terminal IPython shell, such as connecting to
93 the single-process Terminal IPython shell, such as connecting to
93 existing kernel, via:
94 existing kernel, via:
94
95
95 ipython <appname> --existing
96 ipython <appname> --existing
96
97
97 as well as tunnel via SSH
98 as well as tunnel via SSH
98
99
99 """
100 """
100
101
101 classes = classes
102 classes = classes
102 flags = Dict(flags)
103 flags = Dict(flags)
103 aliases = Dict(aliases)
104 aliases = Dict(aliases)
104 kernel_manager_class = KernelManager
105 kernel_manager_class = KernelManager
105 kernel_client_class = BlockingKernelClient
106 kernel_client_class = BlockingKernelClient
106
107
107 kernel_argv = List(Unicode)
108 kernel_argv = List(Unicode)
108 # frontend flags&aliases to be stripped when building kernel_argv
109 # frontend flags&aliases to be stripped when building kernel_argv
109 frontend_flags = Any(app_flags)
110 frontend_flags = Any(app_flags)
110 frontend_aliases = Any(app_aliases)
111 frontend_aliases = Any(app_aliases)
111
112
112 # create requested profiles by default, if they don't exist:
113 # create requested profiles by default, if they don't exist:
113 auto_create = CBool(True)
114 auto_create = CBool(True)
114 # connection info:
115 # connection info:
115
116
116 sshserver = Unicode('', config=True,
117 sshserver = Unicode('', config=True,
117 help="""The SSH server to use to connect to the kernel.""")
118 help="""The SSH server to use to connect to the kernel.""")
118 sshkey = Unicode('', config=True,
119 sshkey = Unicode('', config=True,
119 help="""Path to the ssh key to use for logging in to the ssh server.""")
120 help="""Path to the ssh key to use for logging in to the ssh server.""")
120
121
121 def _connection_file_default(self):
122 def _connection_file_default(self):
122 return 'kernel-%i.json' % os.getpid()
123 return 'kernel-%i.json' % os.getpid()
123
124
124 existing = CUnicode('', config=True,
125 existing = CUnicode('', config=True,
125 help="""Connect to an already running kernel""")
126 help="""Connect to an already running kernel""")
126
127
127 kernel_name = Unicode('python', config=True,
128 kernel_name = Unicode('python', config=True,
128 help="""The name of the default kernel to start.""")
129 help="""The name of the default kernel to start.""")
129
130
130 confirm_exit = CBool(True, config=True,
131 confirm_exit = CBool(True, config=True,
131 help="""
132 help="""
132 Set to display confirmation dialog on exit. You can always use 'exit' or 'quit',
133 Set to display confirmation dialog on exit. You can always use 'exit' or 'quit',
133 to force a direct exit without any confirmation.""",
134 to force a direct exit without any confirmation.""",
134 )
135 )
135
136
136 def build_kernel_argv(self, argv=None):
137 def build_kernel_argv(self, argv=None):
137 """build argv to be passed to kernel subprocess
138 """build argv to be passed to kernel subprocess
138
139
139 Override in subclasses if any args should be passed to the kernel
140 Override in subclasses if any args should be passed to the kernel
140 """
141 """
141 self.kernel_argv = self.extra_args
142 self.kernel_argv = self.extra_args
142
143
143 def init_connection_file(self):
144 def init_connection_file(self):
144 """find the connection file, and load the info if found.
145 """find the connection file, and load the info if found.
145
146
146 The current working directory and the current profile's security
147 The current working directory and the current profile's security
147 directory will be searched for the file if it is not given by
148 directory will be searched for the file if it is not given by
148 absolute path.
149 absolute path.
149
150
150 When attempting to connect to an existing kernel and the `--existing`
151 When attempting to connect to an existing kernel and the `--existing`
151 argument does not match an existing file, it will be interpreted as a
152 argument does not match an existing file, it will be interpreted as a
152 fileglob, and the matching file in the current profile's security dir
153 fileglob, and the matching file in the current profile's security dir
153 with the latest access time will be used.
154 with the latest access time will be used.
154
155
155 After this method is called, self.connection_file contains the *full path*
156 After this method is called, self.connection_file contains the *full path*
156 to the connection file, never just its name.
157 to the connection file, never just its name.
157 """
158 """
158 if self.existing:
159 if self.existing:
159 try:
160 try:
160 cf = find_connection_file(self.existing)
161 cf = find_connection_file(self.existing)
161 except Exception:
162 except Exception:
162 self.log.critical("Could not find existing kernel connection file %s", self.existing)
163 self.log.critical("Could not find existing kernel connection file %s", self.existing)
163 self.exit(1)
164 self.exit(1)
164 self.log.debug("Connecting to existing kernel: %s" % cf)
165 self.log.debug("Connecting to existing kernel: %s" % cf)
165 self.connection_file = cf
166 self.connection_file = cf
166 else:
167 else:
167 # not existing, check if we are going to write the file
168 # not existing, check if we are going to write the file
168 # and ensure that self.connection_file is a full path, not just the shortname
169 # and ensure that self.connection_file is a full path, not just the shortname
169 try:
170 try:
170 cf = find_connection_file(self.connection_file)
171 cf = find_connection_file(self.connection_file)
171 except Exception:
172 except Exception:
172 # file might not exist
173 # file might not exist
173 if self.connection_file == os.path.basename(self.connection_file):
174 if self.connection_file == os.path.basename(self.connection_file):
174 # just shortname, put it in security dir
175 # just shortname, put it in security dir
175 cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
176 cf = os.path.join(self.profile_dir.security_dir, self.connection_file)
176 else:
177 else:
177 cf = self.connection_file
178 cf = self.connection_file
178 self.connection_file = cf
179 self.connection_file = cf
179 try:
180 try:
180 self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
181 self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
181 except IOError:
182 except IOError:
182 self.log.debug("Connection File not found: %s", self.connection_file)
183 self.log.debug("Connection File not found: %s", self.connection_file)
183 return
184 return
184
185
185 # should load_connection_file only be used for existing?
186 # should load_connection_file only be used for existing?
186 # as it is now, this allows reusing ports if an existing
187 # as it is now, this allows reusing ports if an existing
187 # file is requested
188 # file is requested
188 try:
189 try:
189 self.load_connection_file()
190 self.load_connection_file()
190 except Exception:
191 except Exception:
191 self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
192 self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
192 self.exit(1)
193 self.exit(1)
193
194
194 def init_ssh(self):
195 def init_ssh(self):
195 """set up ssh tunnels, if needed."""
196 """set up ssh tunnels, if needed."""
196 if not self.existing or (not self.sshserver and not self.sshkey):
197 if not self.existing or (not self.sshserver and not self.sshkey):
197 return
198 return
198 self.load_connection_file()
199 self.load_connection_file()
199
200
200 transport = self.transport
201 transport = self.transport
201 ip = self.ip
202 ip = self.ip
202
203
203 if transport != 'tcp':
204 if transport != 'tcp':
204 self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport)
205 self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport)
205 sys.exit(-1)
206 sys.exit(-1)
206
207
207 if self.sshkey and not self.sshserver:
208 if self.sshkey and not self.sshserver:
208 # specifying just the key implies that we are connecting directly
209 # specifying just the key implies that we are connecting directly
209 self.sshserver = ip
210 self.sshserver = ip
210 ip = localhost()
211 ip = localhost()
211
212
212 # build connection dict for tunnels:
213 # build connection dict for tunnels:
213 info = dict(ip=ip,
214 info = dict(ip=ip,
214 shell_port=self.shell_port,
215 shell_port=self.shell_port,
215 iopub_port=self.iopub_port,
216 iopub_port=self.iopub_port,
216 stdin_port=self.stdin_port,
217 stdin_port=self.stdin_port,
217 hb_port=self.hb_port
218 hb_port=self.hb_port
218 )
219 )
219
220
220 self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver))
221 self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver))
221
222
222 # tunnels return a new set of ports, which will be on localhost:
223 # tunnels return a new set of ports, which will be on localhost:
223 self.ip = localhost()
224 self.ip = localhost()
224 try:
225 try:
225 newports = tunnel_to_kernel(info, self.sshserver, self.sshkey)
226 newports = tunnel_to_kernel(info, self.sshserver, self.sshkey)
226 except:
227 except:
227 # even catch KeyboardInterrupt
228 # even catch KeyboardInterrupt
228 self.log.error("Could not setup tunnels", exc_info=True)
229 self.log.error("Could not setup tunnels", exc_info=True)
229 self.exit(1)
230 self.exit(1)
230
231
231 self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports
232 self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports
232
233
233 cf = self.connection_file
234 cf = self.connection_file
234 base,ext = os.path.splitext(cf)
235 base,ext = os.path.splitext(cf)
235 base = os.path.basename(base)
236 base = os.path.basename(base)
236 self.connection_file = os.path.basename(base)+'-ssh'+ext
237 self.connection_file = os.path.basename(base)+'-ssh'+ext
237 self.log.info("To connect another client via this tunnel, use:")
238 self.log.info("To connect another client via this tunnel, use:")
238 self.log.info("--existing %s" % self.connection_file)
239 self.log.info("--existing %s" % self.connection_file)
239
240
240 def _new_connection_file(self):
241 def _new_connection_file(self):
241 cf = ''
242 cf = ''
242 while not cf:
243 while not cf:
243 # we don't need a 128b id to distinguish kernels, use more readable
244 # we don't need a 128b id to distinguish kernels, use more readable
244 # 48b node segment (12 hex chars). Users running more than 32k simultaneous
245 # 48b node segment (12 hex chars). Users running more than 32k simultaneous
245 # kernels can subclass.
246 # kernels can subclass.
246 ident = str(uuid.uuid4()).split('-')[-1]
247 ident = str(uuid.uuid4()).split('-')[-1]
247 cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident)
248 cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident)
248 # only keep if it's actually new. Protect against unlikely collision
249 # only keep if it's actually new. Protect against unlikely collision
249 # in 48b random search space
250 # in 48b random search space
250 cf = cf if not os.path.exists(cf) else ''
251 cf = cf if not os.path.exists(cf) else ''
251 return cf
252 return cf
252
253
253 def init_kernel_manager(self):
254 def init_kernel_manager(self):
254 # Don't let Qt or ZMQ swallow KeyboardInterupts.
255 # Don't let Qt or ZMQ swallow KeyboardInterupts.
255 if self.existing:
256 if self.existing:
256 self.kernel_manager = None
257 self.kernel_manager = None
257 return
258 return
258 signal.signal(signal.SIGINT, signal.SIG_DFL)
259 signal.signal(signal.SIGINT, signal.SIG_DFL)
259
260
260 # Create a KernelManager and start a kernel.
261 # Create a KernelManager and start a kernel.
261 try:
262 try:
262 self.kernel_manager = self.kernel_manager_class(
263 self.kernel_manager = self.kernel_manager_class(
263 ip=self.ip,
264 ip=self.ip,
264 session=self.session,
265 session=self.session,
265 transport=self.transport,
266 transport=self.transport,
266 shell_port=self.shell_port,
267 shell_port=self.shell_port,
267 iopub_port=self.iopub_port,
268 iopub_port=self.iopub_port,
268 stdin_port=self.stdin_port,
269 stdin_port=self.stdin_port,
269 hb_port=self.hb_port,
270 hb_port=self.hb_port,
270 connection_file=self.connection_file,
271 connection_file=self.connection_file,
271 kernel_name=self.kernel_name,
272 kernel_name=self.kernel_name,
272 parent=self,
273 parent=self,
273 ipython_dir=self.ipython_dir,
274 ipython_dir=self.ipython_dir,
274 )
275 )
275 except NoSuchKernel:
276 except NoSuchKernel:
276 self.log.critical("Could not find kernel %s", self.kernel_name)
277 self.log.critical("Could not find kernel %s", self.kernel_name)
277 self.exit(1)
278 self.exit(1)
278
279
279 self.kernel_manager.client_factory = self.kernel_client_class
280 self.kernel_manager.client_factory = self.kernel_client_class
280 # FIXME: remove special treatment of IPython kernels
281 # FIXME: remove special treatment of IPython kernels
281 kwargs = {}
282 kwargs = {}
282 if self.kernel_manager.ipython_kernel:
283 if self.kernel_manager.ipython_kernel:
283 kwargs['extra_arguments'] = self.kernel_argv
284 kwargs['extra_arguments'] = self.kernel_argv
284 self.kernel_manager.start_kernel(**kwargs)
285 self.kernel_manager.start_kernel(**kwargs)
285 atexit.register(self.kernel_manager.cleanup_ipc_files)
286 atexit.register(self.kernel_manager.cleanup_ipc_files)
286
287
287 if self.sshserver:
288 if self.sshserver:
288 # ssh, write new connection file
289 # ssh, write new connection file
289 self.kernel_manager.write_connection_file()
290 self.kernel_manager.write_connection_file()
290
291
291 # in case KM defaults / ssh writing changes things:
292 # in case KM defaults / ssh writing changes things:
292 km = self.kernel_manager
293 km = self.kernel_manager
293 self.shell_port=km.shell_port
294 self.shell_port=km.shell_port
294 self.iopub_port=km.iopub_port
295 self.iopub_port=km.iopub_port
295 self.stdin_port=km.stdin_port
296 self.stdin_port=km.stdin_port
296 self.hb_port=km.hb_port
297 self.hb_port=km.hb_port
297 self.connection_file = km.connection_file
298 self.connection_file = km.connection_file
298
299
299 atexit.register(self.kernel_manager.cleanup_connection_file)
300 atexit.register(self.kernel_manager.cleanup_connection_file)
300
301
301 def init_kernel_client(self):
302 def init_kernel_client(self):
302 if self.kernel_manager is not None:
303 if self.kernel_manager is not None:
303 self.kernel_client = self.kernel_manager.client()
304 self.kernel_client = self.kernel_manager.client()
304 else:
305 else:
305 self.kernel_client = self.kernel_client_class(
306 self.kernel_client = self.kernel_client_class(
306 session=self.session,
307 session=self.session,
307 ip=self.ip,
308 ip=self.ip,
308 transport=self.transport,
309 transport=self.transport,
309 shell_port=self.shell_port,
310 shell_port=self.shell_port,
310 iopub_port=self.iopub_port,
311 iopub_port=self.iopub_port,
311 stdin_port=self.stdin_port,
312 stdin_port=self.stdin_port,
312 hb_port=self.hb_port,
313 hb_port=self.hb_port,
313 connection_file=self.connection_file,
314 connection_file=self.connection_file,
314 parent=self,
315 parent=self,
315 )
316 )
316
317
317 self.kernel_client.start_channels()
318 self.kernel_client.start_channels()
318
319
319
320
320
321
321 def initialize(self, argv=None):
322 def initialize(self, argv=None):
322 """
323 """
323 Classes which mix this class in should call:
324 Classes which mix this class in should call:
324 IPythonConsoleApp.initialize(self,argv)
325 IPythonConsoleApp.initialize(self,argv)
325 """
326 """
326 self.init_connection_file()
327 self.init_connection_file()
327 self.init_ssh()
328 self.init_ssh()
328 self.init_kernel_manager()
329 self.init_kernel_manager()
329 self.init_kernel_client()
330 self.init_kernel_client()
330
331
@@ -1,67 +1,29 b''
1 """IPython kernels and associated utilities
2
3 For connecting to kernels, use jupyter_client
4 """
1 """
5
2 Shim to maintain backwards compatibility with old IPython.kernel imports.
3 """
6 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
7 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
8
6
9 # Shim to maintain backwards compatibility with old IPython.kernel imports.
10
11 import sys
7 import sys
12 from warnings import warn
8 from warnings import warn
13
9
14 warn("The `IPython.kernel` package has been deprecated. "
10 warn("The `IPython.kernel` package has been deprecated. "
15 "You should import from ipython_kernel or jupyter_client instead.")
11 "You should import from ipython_kernel or jupyter_client instead.")
16
12
17 from IPython.utils.shimmodule import ShimModule
18
19 # Shims for jupyter_client
20 # Can't do a single shim, because the package didn't move all together
21
13
22 for name in (
14 from IPython.utils.shimmodule import ShimModule
23 'adapter',
24 'blocking',
25 'channels',
26 'channelsabc',
27 'client',
28 'clientabc',
29 'connect',
30 'ioloop',
31 'kernelspec',
32 'kernelspecapp',
33 'launcher',
34 'manager',
35 'managerabc',
36 'multikernelmanager',
37 'restarter',
38 'threaded',
39 'tests.test_adapter',
40 'tests.test_connect',
41 'tests.test_kernelmanager',
42 'tests.test_kernelspec',
43 'tests.test_launcher',
44 'tests.test_multikernelmanager',
45 'tests.test_public_api',
46 ):
47 sys.modules['IPython.kernel.%s' % name] = \
48 ShimModule(name, mirror='jupyter_client.%s' % name)
49
15
50 # some files moved out of the zmq prefix
16 # session moved relative to top-level
51 for name in (
17 sys.modules['IPython.kernel.zmq.session'] = ShimModule('session', mirror='jupyter_client.session')
52 'session',
53 'tests.test_session',
54 ):
55 sys.modules['IPython.kernel.zmq.%s' % name] = \
56 ShimModule(name, mirror='jupyter_client.%s' % name)
57 # preserve top-level API modules, all from jupyter_client
58
18
59 # just for friendlier zmq version check
19 for pkg in ('comm', 'inprocess', 'resources', 'zmq'):
60 from . import zmq
20 sys.modules['IPython.kernel.%s' % pkg] = ShimModule(pkg, mirror='ipython_kernel.%s' % pkg)
21 for pkg in ('ioloop', 'blocking'):
22 sys.modules['IPython.kernel.%s' % pkg] = ShimModule(pkg, mirror='jupyter_client.%s' % pkg)
61
23
62 from jupyter_client.connect import *
24 # required for `from IPython.kernel import PKG`
63 from jupyter_client.launcher import *
25 from ipython_kernel import comm, inprocess, resources, zmq
64 from jupyter_client.client import KernelClient
26 from jupyter_client import ioloop, blocking
65 from jupyter_client.manager import KernelManager, run_kernel
27 # public API
66 from jupyter_client.blocking import BlockingKernelClient
28 from ipython_kernel.connect import *
67 from jupyter_client.multikernelmanager import MultiKernelManager
29 from jupyter_client import *
@@ -1,518 +1,520 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """IPython Test Suite Runner.
2 """IPython Test Suite Runner.
3
3
4 This module provides a main entry point to a user script to test IPython
4 This module provides a main entry point to a user script to test IPython
5 itself from the command line. There are two ways of running this script:
5 itself from the command line. There are two ways of running this script:
6
6
7 1. With the syntax `iptest all`. This runs our entire test suite by
7 1. With the syntax `iptest all`. This runs our entire test suite by
8 calling this script (with different arguments) recursively. This
8 calling this script (with different arguments) recursively. This
9 causes modules and package to be tested in different processes, using nose
9 causes modules and package to be tested in different processes, using nose
10 or trial where appropriate.
10 or trial where appropriate.
11 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
11 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
12 the script simply calls nose, but with special command line flags and
12 the script simply calls nose, but with special command line flags and
13 plugins loaded.
13 plugins loaded.
14
14
15 """
15 """
16
16
17 # Copyright (c) IPython Development Team.
17 # Copyright (c) IPython Development Team.
18 # Distributed under the terms of the Modified BSD License.
18 # Distributed under the terms of the Modified BSD License.
19
19
20 from __future__ import print_function
20 from __future__ import print_function
21
21
22 import glob
22 import glob
23 from io import BytesIO
23 from io import BytesIO
24 import os
24 import os
25 import os.path as path
25 import os.path as path
26 import sys
26 import sys
27 from threading import Thread, Lock, Event
27 from threading import Thread, Lock, Event
28 import warnings
28 import warnings
29
29
30 import nose.plugins.builtin
30 import nose.plugins.builtin
31 from nose.plugins.xunit import Xunit
31 from nose.plugins.xunit import Xunit
32 from nose import SkipTest
32 from nose import SkipTest
33 from nose.core import TestProgram
33 from nose.core import TestProgram
34 from nose.plugins import Plugin
34 from nose.plugins import Plugin
35 from nose.util import safe_str
35 from nose.util import safe_str
36
36
37 from IPython.utils.process import is_cmd_found
37 from IPython.utils.process import is_cmd_found
38 from IPython.utils.py3compat import bytes_to_str
38 from IPython.utils.py3compat import bytes_to_str
39 from IPython.utils.importstring import import_item
39 from IPython.utils.importstring import import_item
40 from IPython.testing.plugin.ipdoctest import IPythonDoctest
40 from IPython.testing.plugin.ipdoctest import IPythonDoctest
41 from IPython.external.decorators import KnownFailure, knownfailureif
41 from IPython.external.decorators import KnownFailure, knownfailureif
42
42
43 pjoin = path.join
43 pjoin = path.join
44
44
45 #-----------------------------------------------------------------------------
45 #-----------------------------------------------------------------------------
46 # Warnings control
46 # Warnings control
47 #-----------------------------------------------------------------------------
47 #-----------------------------------------------------------------------------
48
48
49 # Twisted generates annoying warnings with Python 2.6, as will do other code
49 # Twisted generates annoying warnings with Python 2.6, as will do other code
50 # that imports 'sets' as of today
50 # that imports 'sets' as of today
51 warnings.filterwarnings('ignore', 'the sets module is deprecated',
51 warnings.filterwarnings('ignore', 'the sets module is deprecated',
52 DeprecationWarning )
52 DeprecationWarning )
53
53
54 # This one also comes from Twisted
54 # This one also comes from Twisted
55 warnings.filterwarnings('ignore', 'the sha module is deprecated',
55 warnings.filterwarnings('ignore', 'the sha module is deprecated',
56 DeprecationWarning)
56 DeprecationWarning)
57
57
58 # Wx on Fedora11 spits these out
58 # Wx on Fedora11 spits these out
59 warnings.filterwarnings('ignore', 'wxPython/wxWidgets release number mismatch',
59 warnings.filterwarnings('ignore', 'wxPython/wxWidgets release number mismatch',
60 UserWarning)
60 UserWarning)
61
61
62 # ------------------------------------------------------------------------------
62 # ------------------------------------------------------------------------------
63 # Monkeypatch Xunit to count known failures as skipped.
63 # Monkeypatch Xunit to count known failures as skipped.
64 # ------------------------------------------------------------------------------
64 # ------------------------------------------------------------------------------
65 def monkeypatch_xunit():
65 def monkeypatch_xunit():
66 try:
66 try:
67 knownfailureif(True)(lambda: None)()
67 knownfailureif(True)(lambda: None)()
68 except Exception as e:
68 except Exception as e:
69 KnownFailureTest = type(e)
69 KnownFailureTest = type(e)
70
70
71 def addError(self, test, err, capt=None):
71 def addError(self, test, err, capt=None):
72 if issubclass(err[0], KnownFailureTest):
72 if issubclass(err[0], KnownFailureTest):
73 err = (SkipTest,) + err[1:]
73 err = (SkipTest,) + err[1:]
74 return self.orig_addError(test, err, capt)
74 return self.orig_addError(test, err, capt)
75
75
76 Xunit.orig_addError = Xunit.addError
76 Xunit.orig_addError = Xunit.addError
77 Xunit.addError = addError
77 Xunit.addError = addError
78
78
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80 # Check which dependencies are installed and greater than minimum version.
80 # Check which dependencies are installed and greater than minimum version.
81 #-----------------------------------------------------------------------------
81 #-----------------------------------------------------------------------------
82 def extract_version(mod):
82 def extract_version(mod):
83 return mod.__version__
83 return mod.__version__
84
84
85 def test_for(item, min_version=None, callback=extract_version):
85 def test_for(item, min_version=None, callback=extract_version):
86 """Test to see if item is importable, and optionally check against a minimum
86 """Test to see if item is importable, and optionally check against a minimum
87 version.
87 version.
88
88
89 If min_version is given, the default behavior is to check against the
89 If min_version is given, the default behavior is to check against the
90 `__version__` attribute of the item, but specifying `callback` allows you to
90 `__version__` attribute of the item, but specifying `callback` allows you to
91 extract the value you are interested in. e.g::
91 extract the value you are interested in. e.g::
92
92
93 In [1]: import sys
93 In [1]: import sys
94
94
95 In [2]: from IPython.testing.iptest import test_for
95 In [2]: from IPython.testing.iptest import test_for
96
96
97 In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
97 In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
98 Out[3]: True
98 Out[3]: True
99
99
100 """
100 """
101 try:
101 try:
102 check = import_item(item)
102 check = import_item(item)
103 except (ImportError, RuntimeError):
103 except (ImportError, RuntimeError):
104 # GTK reports Runtime error if it can't be initialized even if it's
104 # GTK reports Runtime error if it can't be initialized even if it's
105 # importable.
105 # importable.
106 return False
106 return False
107 else:
107 else:
108 if min_version:
108 if min_version:
109 if callback:
109 if callback:
110 # extra processing step to get version to compare
110 # extra processing step to get version to compare
111 check = callback(check)
111 check = callback(check)
112
112
113 return check >= min_version
113 return check >= min_version
114 else:
114 else:
115 return True
115 return True
116
116
117 # Global dict where we can store information on what we have and what we don't
117 # Global dict where we can store information on what we have and what we don't
118 # have available at test run time
118 # have available at test run time
119 have = {}
119 have = {}
120
120
121 have['curses'] = test_for('_curses')
121 have['curses'] = test_for('_curses')
122 have['matplotlib'] = test_for('matplotlib')
122 have['matplotlib'] = test_for('matplotlib')
123 have['numpy'] = test_for('numpy')
123 have['numpy'] = test_for('numpy')
124 have['pexpect'] = test_for('pexpect')
124 have['pexpect'] = test_for('pexpect')
125 have['pymongo'] = test_for('pymongo')
125 have['pymongo'] = test_for('pymongo')
126 have['pygments'] = test_for('pygments')
126 have['pygments'] = test_for('pygments')
127 have['qt'] = test_for('IPython.external.qt')
127 have['qt'] = test_for('IPython.external.qt')
128 have['sqlite3'] = test_for('sqlite3')
128 have['sqlite3'] = test_for('sqlite3')
129 have['tornado'] = test_for('tornado.version_info', (4,0), callback=None)
129 have['tornado'] = test_for('tornado.version_info', (4,0), callback=None)
130 have['jinja2'] = test_for('jinja2')
130 have['jinja2'] = test_for('jinja2')
131 have['mistune'] = test_for('mistune')
131 have['mistune'] = test_for('mistune')
132 have['requests'] = test_for('requests')
132 have['requests'] = test_for('requests')
133 have['sphinx'] = test_for('sphinx')
133 have['sphinx'] = test_for('sphinx')
134 have['jsonschema'] = test_for('jsonschema')
134 have['jsonschema'] = test_for('jsonschema')
135 have['terminado'] = test_for('terminado')
135 have['terminado'] = test_for('terminado')
136 have['casperjs'] = is_cmd_found('casperjs')
136 have['casperjs'] = is_cmd_found('casperjs')
137 have['phantomjs'] = is_cmd_found('phantomjs')
137 have['phantomjs'] = is_cmd_found('phantomjs')
138 have['slimerjs'] = is_cmd_found('slimerjs')
138 have['slimerjs'] = is_cmd_found('slimerjs')
139
139
140 min_zmq = (13,)
140 min_zmq = (13,)
141
141
142 have['zmq'] = test_for('zmq.pyzmq_version_info', min_zmq, callback=lambda x: x())
142 have['zmq'] = test_for('zmq.pyzmq_version_info', min_zmq, callback=lambda x: x())
143
143
144 #-----------------------------------------------------------------------------
144 #-----------------------------------------------------------------------------
145 # Test suite definitions
145 # Test suite definitions
146 #-----------------------------------------------------------------------------
146 #-----------------------------------------------------------------------------
147
147
148 test_group_names = ['parallel', 'kernel', 'kernel.inprocess', 'config', 'core',
148 test_group_names = ['parallel', 'kernel', 'kernel.inprocess', 'config', 'core',
149 'extensions', 'lib', 'terminal', 'testing', 'utils',
149 'extensions', 'lib', 'terminal', 'testing', 'utils',
150 'nbformat', 'qt', 'html', 'nbconvert'
150 'nbformat', 'qt', 'html', 'nbconvert'
151 ]
151 ]
152
152
153 class TestSection(object):
153 class TestSection(object):
154 def __init__(self, name, includes):
154 def __init__(self, name, includes):
155 self.name = name
155 self.name = name
156 self.includes = includes
156 self.includes = includes
157 self.excludes = []
157 self.excludes = []
158 self.dependencies = []
158 self.dependencies = []
159 self.enabled = True
159 self.enabled = True
160
160
161 def exclude(self, module):
161 def exclude(self, module):
162 if not module.startswith('IPython'):
162 if not module.startswith('IPython'):
163 module = self.includes[0] + "." + module
163 module = self.includes[0] + "." + module
164 self.excludes.append(module.replace('.', os.sep))
164 self.excludes.append(module.replace('.', os.sep))
165
165
166 def requires(self, *packages):
166 def requires(self, *packages):
167 self.dependencies.extend(packages)
167 self.dependencies.extend(packages)
168
168
169 @property
169 @property
170 def will_run(self):
170 def will_run(self):
171 return self.enabled and all(have[p] for p in self.dependencies)
171 return self.enabled and all(have[p] for p in self.dependencies)
172
172
173 shims = {
173 shims = {
174 'parallel': 'ipython_parallel',
174 'parallel': 'ipython_parallel',
175 'kernel': 'ipython_kernel',
176 'kernel.inprocess': 'ipython_kernel.inprocess',
175 }
177 }
176
178
177 # Name -> (include, exclude, dependencies_met)
179 # Name -> (include, exclude, dependencies_met)
178 test_sections = {n:TestSection(n, [shims.get(n, 'IPython.%s' % n)]) for n in test_group_names}
180 test_sections = {n:TestSection(n, [shims.get(n, 'IPython.%s' % n)]) for n in test_group_names}
179
181
180
182
181 # Exclusions and dependencies
183 # Exclusions and dependencies
182 # ---------------------------
184 # ---------------------------
183
185
184 # core:
186 # core:
185 sec = test_sections['core']
187 sec = test_sections['core']
186 if not have['sqlite3']:
188 if not have['sqlite3']:
187 sec.exclude('tests.test_history')
189 sec.exclude('tests.test_history')
188 sec.exclude('history')
190 sec.exclude('history')
189 if not have['matplotlib']:
191 if not have['matplotlib']:
190 sec.exclude('pylabtools'),
192 sec.exclude('pylabtools'),
191 sec.exclude('tests.test_pylabtools')
193 sec.exclude('tests.test_pylabtools')
192
194
193 # lib:
195 # lib:
194 sec = test_sections['lib']
196 sec = test_sections['lib']
195 if not have['zmq']:
197 if not have['zmq']:
196 sec.exclude('kernel')
198 sec.exclude('kernel')
197 # We do this unconditionally, so that the test suite doesn't import
199 # We do this unconditionally, so that the test suite doesn't import
198 # gtk, changing the default encoding and masking some unicode bugs.
200 # gtk, changing the default encoding and masking some unicode bugs.
199 sec.exclude('inputhookgtk')
201 sec.exclude('inputhookgtk')
200 # We also do this unconditionally, because wx can interfere with Unix signals.
202 # We also do this unconditionally, because wx can interfere with Unix signals.
201 # There are currently no tests for it anyway.
203 # There are currently no tests for it anyway.
202 sec.exclude('inputhookwx')
204 sec.exclude('inputhookwx')
203 # Testing inputhook will need a lot of thought, to figure out
205 # Testing inputhook will need a lot of thought, to figure out
204 # how to have tests that don't lock up with the gui event
206 # how to have tests that don't lock up with the gui event
205 # loops in the picture
207 # loops in the picture
206 sec.exclude('inputhook')
208 sec.exclude('inputhook')
207
209
208 # testing:
210 # testing:
209 sec = test_sections['testing']
211 sec = test_sections['testing']
210 # These have to be skipped on win32 because they use echo, rm, cd, etc.
212 # These have to be skipped on win32 because they use echo, rm, cd, etc.
211 # See ticket https://github.com/ipython/ipython/issues/87
213 # See ticket https://github.com/ipython/ipython/issues/87
212 if sys.platform == 'win32':
214 if sys.platform == 'win32':
213 sec.exclude('plugin.test_exampleip')
215 sec.exclude('plugin.test_exampleip')
214 sec.exclude('plugin.dtexample')
216 sec.exclude('plugin.dtexample')
215
217
216 # terminal:
218 # terminal:
217 if (not have['pexpect']) or (not have['zmq']):
219 if (not have['pexpect']) or (not have['zmq']):
218 test_sections['terminal'].exclude('console')
220 test_sections['terminal'].exclude('console')
219
221
220 # parallel
222 # parallel
221 sec = test_sections['parallel']
223 sec = test_sections['parallel']
222 sec.requires('zmq')
224 sec.requires('zmq')
223 if not have['pymongo']:
225 if not have['pymongo']:
224 sec.exclude('controller.mongodb')
226 sec.exclude('controller.mongodb')
225 sec.exclude('tests.test_mongodb')
227 sec.exclude('tests.test_mongodb')
226
228
227 # kernel:
229 # kernel:
228 sec = test_sections['kernel']
230 sec = test_sections['kernel']
229 sec.requires('zmq')
231 sec.requires('zmq')
230 # The in-process kernel tests are done in a separate section
232 # The in-process kernel tests are done in a separate section
231 sec.exclude('inprocess')
233 sec.exclude('inprocess')
232 # importing gtk sets the default encoding, which we want to avoid
234 # importing gtk sets the default encoding, which we want to avoid
233 sec.exclude('zmq.gui.gtkembed')
235 sec.exclude('zmq.gui.gtkembed')
234 sec.exclude('zmq.gui.gtk3embed')
236 sec.exclude('zmq.gui.gtk3embed')
235 if not have['matplotlib']:
237 if not have['matplotlib']:
236 sec.exclude('zmq.pylab')
238 sec.exclude('zmq.pylab')
237
239
238 # kernel.inprocess:
240 # kernel.inprocess:
239 test_sections['kernel.inprocess'].requires('zmq')
241 test_sections['kernel.inprocess'].requires('zmq')
240
242
241 # extensions:
243 # extensions:
242 sec = test_sections['extensions']
244 sec = test_sections['extensions']
243 # This is deprecated in favour of rpy2
245 # This is deprecated in favour of rpy2
244 sec.exclude('rmagic')
246 sec.exclude('rmagic')
245 # autoreload does some strange stuff, so move it to its own test section
247 # autoreload does some strange stuff, so move it to its own test section
246 sec.exclude('autoreload')
248 sec.exclude('autoreload')
247 sec.exclude('tests.test_autoreload')
249 sec.exclude('tests.test_autoreload')
248 test_sections['autoreload'] = TestSection('autoreload',
250 test_sections['autoreload'] = TestSection('autoreload',
249 ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
251 ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
250 test_group_names.append('autoreload')
252 test_group_names.append('autoreload')
251
253
252 # qt:
254 # qt:
253 test_sections['qt'].requires('zmq', 'qt', 'pygments')
255 test_sections['qt'].requires('zmq', 'qt', 'pygments')
254
256
255 # html:
257 # html:
256 sec = test_sections['html']
258 sec = test_sections['html']
257 sec.requires('zmq', 'tornado', 'requests', 'sqlite3', 'jsonschema')
259 sec.requires('zmq', 'tornado', 'requests', 'sqlite3', 'jsonschema')
258 # The notebook 'static' directory contains JS, css and other
260 # The notebook 'static' directory contains JS, css and other
259 # files for web serving. Occasionally projects may put a .py
261 # files for web serving. Occasionally projects may put a .py
260 # file in there (MathJax ships a conf.py), so we might as
262 # file in there (MathJax ships a conf.py), so we might as
261 # well play it safe and skip the whole thing.
263 # well play it safe and skip the whole thing.
262 sec.exclude('static')
264 sec.exclude('static')
263 sec.exclude('tasks')
265 sec.exclude('tasks')
264 if not have['jinja2']:
266 if not have['jinja2']:
265 sec.exclude('notebookapp')
267 sec.exclude('notebookapp')
266 if not have['pygments'] or not have['jinja2']:
268 if not have['pygments'] or not have['jinja2']:
267 sec.exclude('nbconvert')
269 sec.exclude('nbconvert')
268 if not have['terminado']:
270 if not have['terminado']:
269 sec.exclude('terminal')
271 sec.exclude('terminal')
270
272
271 # config:
273 # config:
272 # Config files aren't really importable stand-alone
274 # Config files aren't really importable stand-alone
273 test_sections['config'].exclude('profile')
275 test_sections['config'].exclude('profile')
274
276
275 # nbconvert:
277 # nbconvert:
276 sec = test_sections['nbconvert']
278 sec = test_sections['nbconvert']
277 sec.requires('pygments', 'jinja2', 'jsonschema', 'mistune')
279 sec.requires('pygments', 'jinja2', 'jsonschema', 'mistune')
278 # Exclude nbconvert directories containing config files used to test.
280 # Exclude nbconvert directories containing config files used to test.
279 # Executing the config files with iptest would cause an exception.
281 # Executing the config files with iptest would cause an exception.
280 sec.exclude('tests.files')
282 sec.exclude('tests.files')
281 sec.exclude('exporters.tests.files')
283 sec.exclude('exporters.tests.files')
282 if not have['tornado']:
284 if not have['tornado']:
283 sec.exclude('nbconvert.post_processors.serve')
285 sec.exclude('nbconvert.post_processors.serve')
284 sec.exclude('nbconvert.post_processors.tests.test_serve')
286 sec.exclude('nbconvert.post_processors.tests.test_serve')
285
287
286 # nbformat:
288 # nbformat:
287 test_sections['nbformat'].requires('jsonschema')
289 test_sections['nbformat'].requires('jsonschema')
288
290
289 #-----------------------------------------------------------------------------
291 #-----------------------------------------------------------------------------
290 # Functions and classes
292 # Functions and classes
291 #-----------------------------------------------------------------------------
293 #-----------------------------------------------------------------------------
292
294
293 def check_exclusions_exist():
295 def check_exclusions_exist():
294 from IPython.utils.path import get_ipython_package_dir
296 from IPython.utils.path import get_ipython_package_dir
295 from IPython.utils.warn import warn
297 from IPython.utils.warn import warn
296 parent = os.path.dirname(get_ipython_package_dir())
298 parent = os.path.dirname(get_ipython_package_dir())
297 for sec in test_sections:
299 for sec in test_sections:
298 for pattern in sec.exclusions:
300 for pattern in sec.exclusions:
299 fullpath = pjoin(parent, pattern)
301 fullpath = pjoin(parent, pattern)
300 if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
302 if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
301 warn("Excluding nonexistent file: %r" % pattern)
303 warn("Excluding nonexistent file: %r" % pattern)
302
304
303
305
304 class ExclusionPlugin(Plugin):
306 class ExclusionPlugin(Plugin):
305 """A nose plugin to effect our exclusions of files and directories.
307 """A nose plugin to effect our exclusions of files and directories.
306 """
308 """
307 name = 'exclusions'
309 name = 'exclusions'
308 score = 3000 # Should come before any other plugins
310 score = 3000 # Should come before any other plugins
309
311
310 def __init__(self, exclude_patterns=None):
312 def __init__(self, exclude_patterns=None):
311 """
313 """
312 Parameters
314 Parameters
313 ----------
315 ----------
314
316
315 exclude_patterns : sequence of strings, optional
317 exclude_patterns : sequence of strings, optional
316 Filenames containing these patterns (as raw strings, not as regular
318 Filenames containing these patterns (as raw strings, not as regular
317 expressions) are excluded from the tests.
319 expressions) are excluded from the tests.
318 """
320 """
319 self.exclude_patterns = exclude_patterns or []
321 self.exclude_patterns = exclude_patterns or []
320 super(ExclusionPlugin, self).__init__()
322 super(ExclusionPlugin, self).__init__()
321
323
322 def options(self, parser, env=os.environ):
324 def options(self, parser, env=os.environ):
323 Plugin.options(self, parser, env)
325 Plugin.options(self, parser, env)
324
326
325 def configure(self, options, config):
327 def configure(self, options, config):
326 Plugin.configure(self, options, config)
328 Plugin.configure(self, options, config)
327 # Override nose trying to disable plugin.
329 # Override nose trying to disable plugin.
328 self.enabled = True
330 self.enabled = True
329
331
330 def wantFile(self, filename):
332 def wantFile(self, filename):
331 """Return whether the given filename should be scanned for tests.
333 """Return whether the given filename should be scanned for tests.
332 """
334 """
333 if any(pat in filename for pat in self.exclude_patterns):
335 if any(pat in filename for pat in self.exclude_patterns):
334 return False
336 return False
335 return None
337 return None
336
338
337 def wantDirectory(self, directory):
339 def wantDirectory(self, directory):
338 """Return whether the given directory should be scanned for tests.
340 """Return whether the given directory should be scanned for tests.
339 """
341 """
340 if any(pat in directory for pat in self.exclude_patterns):
342 if any(pat in directory for pat in self.exclude_patterns):
341 return False
343 return False
342 return None
344 return None
343
345
344
346
345 class StreamCapturer(Thread):
347 class StreamCapturer(Thread):
346 daemon = True # Don't hang if main thread crashes
348 daemon = True # Don't hang if main thread crashes
347 started = False
349 started = False
348 def __init__(self, echo=False):
350 def __init__(self, echo=False):
349 super(StreamCapturer, self).__init__()
351 super(StreamCapturer, self).__init__()
350 self.echo = echo
352 self.echo = echo
351 self.streams = []
353 self.streams = []
352 self.buffer = BytesIO()
354 self.buffer = BytesIO()
353 self.readfd, self.writefd = os.pipe()
355 self.readfd, self.writefd = os.pipe()
354 self.buffer_lock = Lock()
356 self.buffer_lock = Lock()
355 self.stop = Event()
357 self.stop = Event()
356
358
357 def run(self):
359 def run(self):
358 self.started = True
360 self.started = True
359
361
360 while not self.stop.is_set():
362 while not self.stop.is_set():
361 chunk = os.read(self.readfd, 1024)
363 chunk = os.read(self.readfd, 1024)
362
364
363 with self.buffer_lock:
365 with self.buffer_lock:
364 self.buffer.write(chunk)
366 self.buffer.write(chunk)
365 if self.echo:
367 if self.echo:
366 sys.stdout.write(bytes_to_str(chunk))
368 sys.stdout.write(bytes_to_str(chunk))
367
369
368 os.close(self.readfd)
370 os.close(self.readfd)
369 os.close(self.writefd)
371 os.close(self.writefd)
370
372
371 def reset_buffer(self):
373 def reset_buffer(self):
372 with self.buffer_lock:
374 with self.buffer_lock:
373 self.buffer.truncate(0)
375 self.buffer.truncate(0)
374 self.buffer.seek(0)
376 self.buffer.seek(0)
375
377
376 def get_buffer(self):
378 def get_buffer(self):
377 with self.buffer_lock:
379 with self.buffer_lock:
378 return self.buffer.getvalue()
380 return self.buffer.getvalue()
379
381
380 def ensure_started(self):
382 def ensure_started(self):
381 if not self.started:
383 if not self.started:
382 self.start()
384 self.start()
383
385
384 def halt(self):
386 def halt(self):
385 """Safely stop the thread."""
387 """Safely stop the thread."""
386 if not self.started:
388 if not self.started:
387 return
389 return
388
390
389 self.stop.set()
391 self.stop.set()
390 os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
392 os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
391 self.join()
393 self.join()
392
394
393 class SubprocessStreamCapturePlugin(Plugin):
395 class SubprocessStreamCapturePlugin(Plugin):
394 name='subprocstreams'
396 name='subprocstreams'
395 def __init__(self):
397 def __init__(self):
396 Plugin.__init__(self)
398 Plugin.__init__(self)
397 self.stream_capturer = StreamCapturer()
399 self.stream_capturer = StreamCapturer()
398 self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
400 self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
399 # This is ugly, but distant parts of the test machinery need to be able
401 # This is ugly, but distant parts of the test machinery need to be able
400 # to redirect streams, so we make the object globally accessible.
402 # to redirect streams, so we make the object globally accessible.
401 nose.iptest_stdstreams_fileno = self.get_write_fileno
403 nose.iptest_stdstreams_fileno = self.get_write_fileno
402
404
403 def get_write_fileno(self):
405 def get_write_fileno(self):
404 if self.destination == 'capture':
406 if self.destination == 'capture':
405 self.stream_capturer.ensure_started()
407 self.stream_capturer.ensure_started()
406 return self.stream_capturer.writefd
408 return self.stream_capturer.writefd
407 elif self.destination == 'discard':
409 elif self.destination == 'discard':
408 return os.open(os.devnull, os.O_WRONLY)
410 return os.open(os.devnull, os.O_WRONLY)
409 else:
411 else:
410 return sys.__stdout__.fileno()
412 return sys.__stdout__.fileno()
411
413
412 def configure(self, options, config):
414 def configure(self, options, config):
413 Plugin.configure(self, options, config)
415 Plugin.configure(self, options, config)
414 # Override nose trying to disable plugin.
416 # Override nose trying to disable plugin.
415 if self.destination == 'capture':
417 if self.destination == 'capture':
416 self.enabled = True
418 self.enabled = True
417
419
418 def startTest(self, test):
420 def startTest(self, test):
419 # Reset log capture
421 # Reset log capture
420 self.stream_capturer.reset_buffer()
422 self.stream_capturer.reset_buffer()
421
423
422 def formatFailure(self, test, err):
424 def formatFailure(self, test, err):
423 # Show output
425 # Show output
424 ec, ev, tb = err
426 ec, ev, tb = err
425 captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
427 captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
426 if captured.strip():
428 if captured.strip():
427 ev = safe_str(ev)
429 ev = safe_str(ev)
428 out = [ev, '>> begin captured subprocess output <<',
430 out = [ev, '>> begin captured subprocess output <<',
429 captured,
431 captured,
430 '>> end captured subprocess output <<']
432 '>> end captured subprocess output <<']
431 return ec, '\n'.join(out), tb
433 return ec, '\n'.join(out), tb
432
434
433 return err
435 return err
434
436
435 formatError = formatFailure
437 formatError = formatFailure
436
438
437 def finalize(self, result):
439 def finalize(self, result):
438 self.stream_capturer.halt()
440 self.stream_capturer.halt()
439
441
440
442
441 def run_iptest():
443 def run_iptest():
442 """Run the IPython test suite using nose.
444 """Run the IPython test suite using nose.
443
445
444 This function is called when this script is **not** called with the form
446 This function is called when this script is **not** called with the form
445 `iptest all`. It simply calls nose with appropriate command line flags
447 `iptest all`. It simply calls nose with appropriate command line flags
446 and accepts all of the standard nose arguments.
448 and accepts all of the standard nose arguments.
447 """
449 """
448 # Apply our monkeypatch to Xunit
450 # Apply our monkeypatch to Xunit
449 if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
451 if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
450 monkeypatch_xunit()
452 monkeypatch_xunit()
451
453
452 warnings.filterwarnings('ignore',
454 warnings.filterwarnings('ignore',
453 'This will be removed soon. Use IPython.testing.util instead')
455 'This will be removed soon. Use IPython.testing.util instead')
454
456
455 arg1 = sys.argv[1]
457 arg1 = sys.argv[1]
456 if arg1 in test_sections:
458 if arg1 in test_sections:
457 section = test_sections[arg1]
459 section = test_sections[arg1]
458 sys.argv[1:2] = section.includes
460 sys.argv[1:2] = section.includes
459 elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
461 elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
460 section = test_sections[arg1[8:]]
462 section = test_sections[arg1[8:]]
461 sys.argv[1:2] = section.includes
463 sys.argv[1:2] = section.includes
462 else:
464 else:
463 section = TestSection(arg1, includes=[arg1])
465 section = TestSection(arg1, includes=[arg1])
464
466
465
467
466 argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
468 argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
467
469
468 '--with-ipdoctest',
470 '--with-ipdoctest',
469 '--ipdoctest-tests','--ipdoctest-extension=txt',
471 '--ipdoctest-tests','--ipdoctest-extension=txt',
470
472
471 # We add --exe because of setuptools' imbecility (it
473 # We add --exe because of setuptools' imbecility (it
472 # blindly does chmod +x on ALL files). Nose does the
474 # blindly does chmod +x on ALL files). Nose does the
473 # right thing and it tries to avoid executables,
475 # right thing and it tries to avoid executables,
474 # setuptools unfortunately forces our hand here. This
476 # setuptools unfortunately forces our hand here. This
475 # has been discussed on the distutils list and the
477 # has been discussed on the distutils list and the
476 # setuptools devs refuse to fix this problem!
478 # setuptools devs refuse to fix this problem!
477 '--exe',
479 '--exe',
478 ]
480 ]
479 if '-a' not in argv and '-A' not in argv:
481 if '-a' not in argv and '-A' not in argv:
480 argv = argv + ['-a', '!crash']
482 argv = argv + ['-a', '!crash']
481
483
482 if nose.__version__ >= '0.11':
484 if nose.__version__ >= '0.11':
483 # I don't fully understand why we need this one, but depending on what
485 # I don't fully understand why we need this one, but depending on what
484 # directory the test suite is run from, if we don't give it, 0 tests
486 # directory the test suite is run from, if we don't give it, 0 tests
485 # get run. Specifically, if the test suite is run from the source dir
487 # get run. Specifically, if the test suite is run from the source dir
486 # with an argument (like 'iptest.py IPython.core', 0 tests are run,
488 # with an argument (like 'iptest.py IPython.core', 0 tests are run,
487 # even if the same call done in this directory works fine). It appears
489 # even if the same call done in this directory works fine). It appears
488 # that if the requested package is in the current dir, nose bails early
490 # that if the requested package is in the current dir, nose bails early
489 # by default. Since it's otherwise harmless, leave it in by default
491 # by default. Since it's otherwise harmless, leave it in by default
490 # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
492 # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
491 argv.append('--traverse-namespace')
493 argv.append('--traverse-namespace')
492
494
493 # use our plugin for doctesting. It will remove the standard doctest plugin
495 # use our plugin for doctesting. It will remove the standard doctest plugin
494 # if it finds it enabled
496 # if it finds it enabled
495 plugins = [ExclusionPlugin(section.excludes), IPythonDoctest(), KnownFailure(),
497 plugins = [ExclusionPlugin(section.excludes), IPythonDoctest(), KnownFailure(),
496 SubprocessStreamCapturePlugin() ]
498 SubprocessStreamCapturePlugin() ]
497
499
498 # Use working directory set by parent process (see iptestcontroller)
500 # Use working directory set by parent process (see iptestcontroller)
499 if 'IPTEST_WORKING_DIR' in os.environ:
501 if 'IPTEST_WORKING_DIR' in os.environ:
500 os.chdir(os.environ['IPTEST_WORKING_DIR'])
502 os.chdir(os.environ['IPTEST_WORKING_DIR'])
501
503
502 # We need a global ipython running in this process, but the special
504 # We need a global ipython running in this process, but the special
503 # in-process group spawns its own IPython kernels, so for *that* group we
505 # in-process group spawns its own IPython kernels, so for *that* group we
504 # must avoid also opening the global one (otherwise there's a conflict of
506 # must avoid also opening the global one (otherwise there's a conflict of
505 # singletons). Ultimately the solution to this problem is to refactor our
507 # singletons). Ultimately the solution to this problem is to refactor our
506 # assumptions about what needs to be a singleton and what doesn't (app
508 # assumptions about what needs to be a singleton and what doesn't (app
507 # objects should, individual shells shouldn't). But for now, this
509 # objects should, individual shells shouldn't). But for now, this
508 # workaround allows the test suite for the inprocess module to complete.
510 # workaround allows the test suite for the inprocess module to complete.
509 if 'kernel.inprocess' not in section.name:
511 if 'kernel.inprocess' not in section.name:
510 from IPython.testing import globalipapp
512 from IPython.testing import globalipapp
511 globalipapp.start_ipython()
513 globalipapp.start_ipython()
512
514
513 # Now nose can run
515 # Now nose can run
514 TestProgram(argv=argv, addplugins=plugins)
516 TestProgram(argv=argv, addplugins=plugins)
515
517
516 if __name__ == '__main__':
518 if __name__ == '__main__':
517 run_iptest()
519 run_iptest()
518
520
@@ -1,226 +1,226 b''
1 """Utilities for launching kernels
1 """Utilities for launching kernels
2 """
2 """
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 from subprocess import Popen, PIPE
9 from subprocess import Popen, PIPE
10
10
11 from IPython.utils.encoding import getdefaultencoding
11 from IPython.utils.encoding import getdefaultencoding
12 from IPython.utils.py3compat import cast_bytes_py2
12 from IPython.utils.py3compat import cast_bytes_py2
13
13
14
14
15 def swallow_argv(argv, aliases=None, flags=None):
15 def swallow_argv(argv, aliases=None, flags=None):
16 """strip frontend-specific aliases and flags from an argument list
16 """strip frontend-specific aliases and flags from an argument list
17
17
18 For use primarily in frontend apps that want to pass a subset of command-line
18 For use primarily in frontend apps that want to pass a subset of command-line
19 arguments through to a subprocess, where frontend-specific flags and aliases
19 arguments through to a subprocess, where frontend-specific flags and aliases
20 should be removed from the list.
20 should be removed from the list.
21
21
22 Parameters
22 Parameters
23 ----------
23 ----------
24
24
25 argv : list(str)
25 argv : list(str)
26 The starting argv, to be filtered
26 The starting argv, to be filtered
27 aliases : container of aliases (dict, list, set, etc.)
27 aliases : container of aliases (dict, list, set, etc.)
28 The frontend-specific aliases to be removed
28 The frontend-specific aliases to be removed
29 flags : container of flags (dict, list, set, etc.)
29 flags : container of flags (dict, list, set, etc.)
30 The frontend-specific flags to be removed
30 The frontend-specific flags to be removed
31
31
32 Returns
32 Returns
33 -------
33 -------
34
34
35 argv : list(str)
35 argv : list(str)
36 The argv list, excluding flags and aliases that have been stripped
36 The argv list, excluding flags and aliases that have been stripped
37 """
37 """
38
38
39 if aliases is None:
39 if aliases is None:
40 aliases = set()
40 aliases = set()
41 if flags is None:
41 if flags is None:
42 flags = set()
42 flags = set()
43
43
44 stripped = list(argv) # copy
44 stripped = list(argv) # copy
45
45
46 swallow_next = False
46 swallow_next = False
47 was_flag = False
47 was_flag = False
48 for a in argv:
48 for a in argv:
49 if a == '--':
49 if a == '--':
50 break
50 break
51 if swallow_next:
51 if swallow_next:
52 swallow_next = False
52 swallow_next = False
53 # last arg was an alias, remove the next one
53 # last arg was an alias, remove the next one
54 # *unless* the last alias has a no-arg flag version, in which
54 # *unless* the last alias has a no-arg flag version, in which
55 # case, don't swallow the next arg if it's also a flag:
55 # case, don't swallow the next arg if it's also a flag:
56 if not (was_flag and a.startswith('-')):
56 if not (was_flag and a.startswith('-')):
57 stripped.remove(a)
57 stripped.remove(a)
58 continue
58 continue
59 if a.startswith('-'):
59 if a.startswith('-'):
60 split = a.lstrip('-').split('=')
60 split = a.lstrip('-').split('=')
61 name = split[0]
61 name = split[0]
62 # we use startswith because argparse accepts any arg to be specified
62 # we use startswith because argparse accepts any arg to be specified
63 # by any leading section, as long as it is unique,
63 # by any leading section, as long as it is unique,
64 # so `--no-br` means `--no-browser` in the notebook, etc.
64 # so `--no-br` means `--no-browser` in the notebook, etc.
65 if any(alias.startswith(name) for alias in aliases):
65 if any(alias.startswith(name) for alias in aliases):
66 stripped.remove(a)
66 stripped.remove(a)
67 if len(split) == 1:
67 if len(split) == 1:
68 # alias passed with arg via space
68 # alias passed with arg via space
69 swallow_next = True
69 swallow_next = True
70 # could have been a flag that matches an alias, e.g. `existing`
70 # could have been a flag that matches an alias, e.g. `existing`
71 # in which case, we might not swallow the next arg
71 # in which case, we might not swallow the next arg
72 was_flag = name in flags
72 was_flag = name in flags
73 elif len(split) == 1 and any(flag.startswith(name) for flag in flags):
73 elif len(split) == 1 and any(flag.startswith(name) for flag in flags):
74 # strip flag, but don't swallow next, as flags don't take args
74 # strip flag, but don't swallow next, as flags don't take args
75 stripped.remove(a)
75 stripped.remove(a)
76
76
77 # return shortened list
77 # return shortened list
78 return stripped
78 return stripped
79
79
80
80
81 def make_ipkernel_cmd(mod='IPython.kernel', executable=None, extra_arguments=[], **kw):
81 def make_ipkernel_cmd(mod='ipython_kernel', executable=None, extra_arguments=[], **kw):
82 """Build Popen command list for launching an IPython kernel.
82 """Build Popen command list for launching an IPython kernel.
83
83
84 Parameters
84 Parameters
85 ----------
85 ----------
86 mod : str, optional (default 'IPython.kernel')
86 mod : str, optional (default 'ipython_kernel')
87 A string of an IPython module whose __main__ starts an IPython kernel
87 A string of an IPython module whose __main__ starts an IPython kernel
88
88
89 executable : str, optional (default sys.executable)
89 executable : str, optional (default sys.executable)
90 The Python executable to use for the kernel process.
90 The Python executable to use for the kernel process.
91
91
92 extra_arguments : list, optional
92 extra_arguments : list, optional
93 A list of extra arguments to pass when executing the launch code.
93 A list of extra arguments to pass when executing the launch code.
94
94
95 Returns
95 Returns
96 -------
96 -------
97
97
98 A Popen command list
98 A Popen command list
99 """
99 """
100 if executable is None:
100 if executable is None:
101 executable = sys.executable
101 executable = sys.executable
102 arguments = [ executable, '-m', mod, '-f', '{connection_file}' ]
102 arguments = [ executable, '-m', mod, '-f', '{connection_file}' ]
103 arguments.extend(extra_arguments)
103 arguments.extend(extra_arguments)
104
104
105 return arguments
105 return arguments
106
106
107
107
108 def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
108 def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
109 independent=False,
109 independent=False,
110 cwd=None,
110 cwd=None,
111 **kw
111 **kw
112 ):
112 ):
113 """ Launches a localhost kernel, binding to the specified ports.
113 """ Launches a localhost kernel, binding to the specified ports.
114
114
115 Parameters
115 Parameters
116 ----------
116 ----------
117 cmd : Popen list,
117 cmd : Popen list,
118 A string of Python code that imports and executes a kernel entry point.
118 A string of Python code that imports and executes a kernel entry point.
119
119
120 stdin, stdout, stderr : optional (default None)
120 stdin, stdout, stderr : optional (default None)
121 Standards streams, as defined in subprocess.Popen.
121 Standards streams, as defined in subprocess.Popen.
122
122
123 independent : bool, optional (default False)
123 independent : bool, optional (default False)
124 If set, the kernel process is guaranteed to survive if this process
124 If set, the kernel process is guaranteed to survive if this process
125 dies. If not set, an effort is made to ensure that the kernel is killed
125 dies. If not set, an effort is made to ensure that the kernel is killed
126 when this process dies. Note that in this case it is still good practice
126 when this process dies. Note that in this case it is still good practice
127 to kill kernels manually before exiting.
127 to kill kernels manually before exiting.
128
128
129 cwd : path, optional
129 cwd : path, optional
130 The working dir of the kernel process (default: cwd of this process).
130 The working dir of the kernel process (default: cwd of this process).
131
131
132 Returns
132 Returns
133 -------
133 -------
134
134
135 Popen instance for the kernel subprocess
135 Popen instance for the kernel subprocess
136 """
136 """
137
137
138 # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
138 # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
139 # are invalid. Unfortunately, there is in general no way to detect whether
139 # are invalid. Unfortunately, there is in general no way to detect whether
140 # they are valid. The following two blocks redirect them to (temporary)
140 # they are valid. The following two blocks redirect them to (temporary)
141 # pipes in certain important cases.
141 # pipes in certain important cases.
142
142
143 # If this process has been backgrounded, our stdin is invalid. Since there
143 # If this process has been backgrounded, our stdin is invalid. Since there
144 # is no compelling reason for the kernel to inherit our stdin anyway, we'll
144 # is no compelling reason for the kernel to inherit our stdin anyway, we'll
145 # place this one safe and always redirect.
145 # place this one safe and always redirect.
146 redirect_in = True
146 redirect_in = True
147 _stdin = PIPE if stdin is None else stdin
147 _stdin = PIPE if stdin is None else stdin
148
148
149 # If this process in running on pythonw, we know that stdin, stdout, and
149 # If this process in running on pythonw, we know that stdin, stdout, and
150 # stderr are all invalid.
150 # stderr are all invalid.
151 redirect_out = sys.executable.endswith('pythonw.exe')
151 redirect_out = sys.executable.endswith('pythonw.exe')
152 if redirect_out:
152 if redirect_out:
153 blackhole = open(os.devnull, 'w')
153 blackhole = open(os.devnull, 'w')
154 _stdout = blackhole if stdout is None else stdout
154 _stdout = blackhole if stdout is None else stdout
155 _stderr = blackhole if stderr is None else stderr
155 _stderr = blackhole if stderr is None else stderr
156 else:
156 else:
157 _stdout, _stderr = stdout, stderr
157 _stdout, _stderr = stdout, stderr
158
158
159 env = env if (env is not None) else os.environ.copy()
159 env = env if (env is not None) else os.environ.copy()
160
160
161 encoding = getdefaultencoding(prefer_stream=False)
161 encoding = getdefaultencoding(prefer_stream=False)
162 kwargs = dict(
162 kwargs = dict(
163 stdin=_stdin,
163 stdin=_stdin,
164 stdout=_stdout,
164 stdout=_stdout,
165 stderr=_stderr,
165 stderr=_stderr,
166 cwd=cwd,
166 cwd=cwd,
167 env=env,
167 env=env,
168 )
168 )
169
169
170 # Spawn a kernel.
170 # Spawn a kernel.
171 if sys.platform == 'win32':
171 if sys.platform == 'win32':
172 # Popen on Python 2 on Windows cannot handle unicode args or cwd
172 # Popen on Python 2 on Windows cannot handle unicode args or cwd
173 cmd = [ cast_bytes_py2(c, encoding) for c in cmd ]
173 cmd = [ cast_bytes_py2(c, encoding) for c in cmd ]
174 if cwd:
174 if cwd:
175 cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii')
175 cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii')
176 kwargs['cwd'] = cwd
176 kwargs['cwd'] = cwd
177
177
178 from jupyter_client.parentpoller import ParentPollerWindows
178 from jupyter_client.parentpoller import ParentPollerWindows
179 # Create a Win32 event for interrupting the kernel
179 # Create a Win32 event for interrupting the kernel
180 # and store it in an environment variable.
180 # and store it in an environment variable.
181 interrupt_event = ParentPollerWindows.create_interrupt_event()
181 interrupt_event = ParentPollerWindows.create_interrupt_event()
182 env["JPY_INTERRUPT_EVENT"] = str(interrupt_event)
182 env["JPY_INTERRUPT_EVENT"] = str(interrupt_event)
183 # deprecated old env name:
183 # deprecated old env name:
184 env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"]
184 env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"]
185
185
186 try:
186 try:
187 from _winapi import DuplicateHandle, GetCurrentProcess, \
187 from _winapi import DuplicateHandle, GetCurrentProcess, \
188 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
188 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
189 except:
189 except:
190 from _subprocess import DuplicateHandle, GetCurrentProcess, \
190 from _subprocess import DuplicateHandle, GetCurrentProcess, \
191 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
191 DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP
192 # Launch the kernel process
192 # Launch the kernel process
193 if independent:
193 if independent:
194 kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP
194 kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP
195 else:
195 else:
196 pid = GetCurrentProcess()
196 pid = GetCurrentProcess()
197 handle = DuplicateHandle(pid, pid, pid, 0,
197 handle = DuplicateHandle(pid, pid, pid, 0,
198 True, # Inheritable by new processes.
198 True, # Inheritable by new processes.
199 DUPLICATE_SAME_ACCESS)
199 DUPLICATE_SAME_ACCESS)
200 env['JPY_PARENT_PID'] = str(int(handle))
200 env['JPY_PARENT_PID'] = str(int(handle))
201
201
202 proc = Popen(cmd, **kwargs)
202 proc = Popen(cmd, **kwargs)
203
203
204 # Attach the interrupt event to the Popen objet so it can be used later.
204 # Attach the interrupt event to the Popen objet so it can be used later.
205 proc.win32_interrupt_event = interrupt_event
205 proc.win32_interrupt_event = interrupt_event
206
206
207 else:
207 else:
208 if independent:
208 if independent:
209 kwargs['preexec_fn'] = lambda: os.setsid()
209 kwargs['preexec_fn'] = lambda: os.setsid()
210 else:
210 else:
211 env['JPY_PARENT_PID'] = str(os.getpid())
211 env['JPY_PARENT_PID'] = str(os.getpid())
212
212
213 proc = Popen(cmd, **kwargs)
213 proc = Popen(cmd, **kwargs)
214
214
215 # Clean up pipes created to work around Popen bug.
215 # Clean up pipes created to work around Popen bug.
216 if redirect_in:
216 if redirect_in:
217 if stdin is None:
217 if stdin is None:
218 proc.stdin.close()
218 proc.stdin.close()
219
219
220 return proc
220 return proc
221
221
222 __all__ = [
222 __all__ = [
223 'swallow_argv',
223 'swallow_argv',
224 'make_ipkernel_cmd',
224 'make_ipkernel_cmd',
225 'launch_kernel',
225 'launch_kernel',
226 ]
226 ]
General Comments 0
You need to be logged in to leave comments. Login now