Show More
@@ -0,0 +1,226 b'' | |||
|
1 | """Utilities for launching kernels | |
|
2 | """ | |
|
3 | ||
|
4 | # Copyright (c) IPython Development Team. | |
|
5 | # Distributed under the terms of the Modified BSD License. | |
|
6 | ||
|
7 | import os | |
|
8 | import sys | |
|
9 | from subprocess import Popen, PIPE | |
|
10 | ||
|
11 | from IPython.utils.encoding import getdefaultencoding | |
|
12 | from IPython.utils.py3compat import cast_bytes_py2 | |
|
13 | ||
|
14 | ||
|
15 | def swallow_argv(argv, aliases=None, flags=None): | |
|
16 | """strip frontend-specific aliases and flags from an argument list | |
|
17 | ||
|
18 | For use primarily in frontend apps that want to pass a subset of command-line | |
|
19 | arguments through to a subprocess, where frontend-specific flags and aliases | |
|
20 | should be removed from the list. | |
|
21 | ||
|
22 | Parameters | |
|
23 | ---------- | |
|
24 | ||
|
25 | argv : list(str) | |
|
26 | The starting argv, to be filtered | |
|
27 | aliases : container of aliases (dict, list, set, etc.) | |
|
28 | The frontend-specific aliases to be removed | |
|
29 | flags : container of flags (dict, list, set, etc.) | |
|
30 | The frontend-specific flags to be removed | |
|
31 | ||
|
32 | Returns | |
|
33 | ------- | |
|
34 | ||
|
35 | argv : list(str) | |
|
36 | The argv list, excluding flags and aliases that have been stripped | |
|
37 | """ | |
|
38 | ||
|
39 | if aliases is None: | |
|
40 | aliases = set() | |
|
41 | if flags is None: | |
|
42 | flags = set() | |
|
43 | ||
|
44 | stripped = list(argv) # copy | |
|
45 | ||
|
46 | swallow_next = False | |
|
47 | was_flag = False | |
|
48 | for a in argv: | |
|
49 | if a == '--': | |
|
50 | break | |
|
51 | if swallow_next: | |
|
52 | swallow_next = False | |
|
53 | # last arg was an alias, remove the next one | |
|
54 | # *unless* the last alias has a no-arg flag version, in which | |
|
55 | # case, don't swallow the next arg if it's also a flag: | |
|
56 | if not (was_flag and a.startswith('-')): | |
|
57 | stripped.remove(a) | |
|
58 | continue | |
|
59 | if a.startswith('-'): | |
|
60 | split = a.lstrip('-').split('=') | |
|
61 | name = split[0] | |
|
62 | # we use startswith because argparse accepts any arg to be specified | |
|
63 | # by any leading section, as long as it is unique, | |
|
64 | # so `--no-br` means `--no-browser` in the notebook, etc. | |
|
65 | if any(alias.startswith(name) for alias in aliases): | |
|
66 | stripped.remove(a) | |
|
67 | if len(split) == 1: | |
|
68 | # alias passed with arg via space | |
|
69 | swallow_next = True | |
|
70 | # could have been a flag that matches an alias, e.g. `existing` | |
|
71 | # in which case, we might not swallow the next arg | |
|
72 | was_flag = name in flags | |
|
73 | elif len(split) == 1 and any(flag.startswith(name) for flag in flags): | |
|
74 | # strip flag, but don't swallow next, as flags don't take args | |
|
75 | stripped.remove(a) | |
|
76 | ||
|
77 | # return shortened list | |
|
78 | return stripped | |
|
79 | ||
|
80 | ||
|
81 | def make_ipkernel_cmd(mod='ipython_kernel', executable=None, extra_arguments=[], **kw): | |
|
82 | """Build Popen command list for launching an IPython kernel. | |
|
83 | ||
|
84 | Parameters | |
|
85 | ---------- | |
|
86 | mod : str, optional (default 'ipython_kernel') | |
|
87 | A string of an IPython module whose __main__ starts an IPython kernel | |
|
88 | ||
|
89 | executable : str, optional (default sys.executable) | |
|
90 | The Python executable to use for the kernel process. | |
|
91 | ||
|
92 | extra_arguments : list, optional | |
|
93 | A list of extra arguments to pass when executing the launch code. | |
|
94 | ||
|
95 | Returns | |
|
96 | ------- | |
|
97 | ||
|
98 | A Popen command list | |
|
99 | """ | |
|
100 | if executable is None: | |
|
101 | executable = sys.executable | |
|
102 | arguments = [ executable, '-m', mod, '-f', '{connection_file}' ] | |
|
103 | arguments.extend(extra_arguments) | |
|
104 | ||
|
105 | return arguments | |
|
106 | ||
|
107 | ||
|
108 | def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None, | |
|
109 | independent=False, | |
|
110 | cwd=None, | |
|
111 | **kw | |
|
112 | ): | |
|
113 | """ Launches a localhost kernel, binding to the specified ports. | |
|
114 | ||
|
115 | Parameters | |
|
116 | ---------- | |
|
117 | cmd : Popen list, | |
|
118 | A string of Python code that imports and executes a kernel entry point. | |
|
119 | ||
|
120 | stdin, stdout, stderr : optional (default None) | |
|
121 | Standards streams, as defined in subprocess.Popen. | |
|
122 | ||
|
123 | independent : bool, optional (default False) | |
|
124 | If set, the kernel process is guaranteed to survive if this process | |
|
125 | dies. If not set, an effort is made to ensure that the kernel is killed | |
|
126 | when this process dies. Note that in this case it is still good practice | |
|
127 | to kill kernels manually before exiting. | |
|
128 | ||
|
129 | cwd : path, optional | |
|
130 | The working dir of the kernel process (default: cwd of this process). | |
|
131 | ||
|
132 | Returns | |
|
133 | ------- | |
|
134 | ||
|
135 | Popen instance for the kernel subprocess | |
|
136 | """ | |
|
137 | ||
|
138 | # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr | |
|
139 | # are invalid. Unfortunately, there is in general no way to detect whether | |
|
140 | # they are valid. The following two blocks redirect them to (temporary) | |
|
141 | # pipes in certain important cases. | |
|
142 | ||
|
143 | # If this process has been backgrounded, our stdin is invalid. Since there | |
|
144 | # is no compelling reason for the kernel to inherit our stdin anyway, we'll | |
|
145 | # place this one safe and always redirect. | |
|
146 | redirect_in = True | |
|
147 | _stdin = PIPE if stdin is None else stdin | |
|
148 | ||
|
149 | # If this process in running on pythonw, we know that stdin, stdout, and | |
|
150 | # stderr are all invalid. | |
|
151 | redirect_out = sys.executable.endswith('pythonw.exe') | |
|
152 | if redirect_out: | |
|
153 | blackhole = open(os.devnull, 'w') | |
|
154 | _stdout = blackhole if stdout is None else stdout | |
|
155 | _stderr = blackhole if stderr is None else stderr | |
|
156 | else: | |
|
157 | _stdout, _stderr = stdout, stderr | |
|
158 | ||
|
159 | env = env if (env is not None) else os.environ.copy() | |
|
160 | ||
|
161 | encoding = getdefaultencoding(prefer_stream=False) | |
|
162 | kwargs = dict( | |
|
163 | stdin=_stdin, | |
|
164 | stdout=_stdout, | |
|
165 | stderr=_stderr, | |
|
166 | cwd=cwd, | |
|
167 | env=env, | |
|
168 | ) | |
|
169 | ||
|
170 | # Spawn a kernel. | |
|
171 | if sys.platform == 'win32': | |
|
172 | # Popen on Python 2 on Windows cannot handle unicode args or cwd | |
|
173 | cmd = [ cast_bytes_py2(c, encoding) for c in cmd ] | |
|
174 | if cwd: | |
|
175 | cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii') | |
|
176 | kwargs['cwd'] = cwd | |
|
177 | ||
|
178 | from jupyter_client.parentpoller import ParentPollerWindows | |
|
179 | # Create a Win32 event for interrupting the kernel | |
|
180 | # and store it in an environment variable. | |
|
181 | interrupt_event = ParentPollerWindows.create_interrupt_event() | |
|
182 | env["JPY_INTERRUPT_EVENT"] = str(interrupt_event) | |
|
183 | # deprecated old env name: | |
|
184 | env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"] | |
|
185 | ||
|
186 | try: | |
|
187 | from _winapi import DuplicateHandle, GetCurrentProcess, \ | |
|
188 | DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP | |
|
189 | except: | |
|
190 | from _subprocess import DuplicateHandle, GetCurrentProcess, \ | |
|
191 | DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP | |
|
192 | # Launch the kernel process | |
|
193 | if independent: | |
|
194 | kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP | |
|
195 | else: | |
|
196 | pid = GetCurrentProcess() | |
|
197 | handle = DuplicateHandle(pid, pid, pid, 0, | |
|
198 | True, # Inheritable by new processes. | |
|
199 | DUPLICATE_SAME_ACCESS) | |
|
200 | env['JPY_PARENT_PID'] = str(int(handle)) | |
|
201 | ||
|
202 | proc = Popen(cmd, **kwargs) | |
|
203 | ||
|
204 | # Attach the interrupt event to the Popen objet so it can be used later. | |
|
205 | proc.win32_interrupt_event = interrupt_event | |
|
206 | ||
|
207 | else: | |
|
208 | if independent: | |
|
209 | kwargs['preexec_fn'] = lambda: os.setsid() | |
|
210 | else: | |
|
211 | env['JPY_PARENT_PID'] = str(os.getpid()) | |
|
212 | ||
|
213 | proc = Popen(cmd, **kwargs) | |
|
214 | ||
|
215 | # Clean up pipes created to work around Popen bug. | |
|
216 | if redirect_in: | |
|
217 | if stdin is None: | |
|
218 | proc.stdin.close() | |
|
219 | ||
|
220 | return proc | |
|
221 | ||
|
222 | __all__ = [ | |
|
223 | 'swallow_argv', | |
|
224 | 'make_ipkernel_cmd', | |
|
225 | 'launch_kernel', | |
|
226 | ] |
@@ -1,330 +1,331 b'' | |||
|
1 | 1 | """ A minimal application base mixin for all ZMQ based IPython frontends. |
|
2 | 2 | |
|
3 | 3 | This is not a complete console app, as subprocess will not be able to receive |
|
4 | 4 | input, there is no real readline support, among other limitations. This is a |
|
5 | 5 | refactoring of what used to be the IPython/qt/console/qtconsoleapp.py |
|
6 | 6 | """ |
|
7 | 7 | # Copyright (c) IPython Development Team. |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | |
|
10 | 10 | import atexit |
|
11 | 11 | import os |
|
12 | 12 | import signal |
|
13 | 13 | import sys |
|
14 | 14 | import uuid |
|
15 | 15 | |
|
16 | 16 | |
|
17 | 17 | from IPython.config.application import boolean_flag |
|
18 | 18 | from IPython.core.profiledir import ProfileDir |
|
19 | 19 | from IPython.kernel.blocking import BlockingKernelClient |
|
20 | 20 | from IPython.kernel import KernelManager |
|
21 | 21 | from IPython.kernel import tunnel_to_kernel, find_connection_file |
|
22 | 22 | from IPython.kernel.kernelspec import NoSuchKernel |
|
23 | 23 | from IPython.utils.path import filefind |
|
24 | 24 | from IPython.utils.traitlets import ( |
|
25 | 25 | Dict, List, Unicode, CUnicode, CBool, Any |
|
26 | 26 | ) |
|
27 | 27 | from IPython.kernel.zmq.session import Session |
|
28 |
from IPython.kernel |
|
|
28 | from IPython.kernel import connect | |
|
29 | ConnectionFileMixin = connect.ConnectionFileMixin | |
|
29 | 30 | |
|
30 | 31 | from IPython.utils.localinterfaces import localhost |
|
31 | 32 | |
|
32 | 33 | #----------------------------------------------------------------------------- |
|
33 | 34 | # Aliases and Flags |
|
34 | 35 | #----------------------------------------------------------------------------- |
|
35 | 36 | |
|
36 | 37 | flags = {} |
|
37 | 38 | |
|
38 | 39 | # the flags that are specific to the frontend |
|
39 | 40 | # these must be scrubbed before being passed to the kernel, |
|
40 | 41 | # or it will raise an error on unrecognized flags |
|
41 | 42 | app_flags = { |
|
42 | 43 | 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}}, |
|
43 | 44 | "Connect to an existing kernel. If no argument specified, guess most recent"), |
|
44 | 45 | } |
|
45 | 46 | app_flags.update(boolean_flag( |
|
46 | 47 | 'confirm-exit', 'IPythonConsoleApp.confirm_exit', |
|
47 | 48 | """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
48 | 49 | to force a direct exit without any confirmation. |
|
49 | 50 | """, |
|
50 | 51 | """Don't prompt the user when exiting. This will terminate the kernel |
|
51 | 52 | if it is owned by the frontend, and leave it alive if it is external. |
|
52 | 53 | """ |
|
53 | 54 | )) |
|
54 | 55 | flags.update(app_flags) |
|
55 | 56 | |
|
56 | 57 | aliases = {} |
|
57 | 58 | |
|
58 | 59 | # also scrub aliases from the frontend |
|
59 | 60 | app_aliases = dict( |
|
60 | 61 | ip = 'IPythonConsoleApp.ip', |
|
61 | 62 | transport = 'IPythonConsoleApp.transport', |
|
62 | 63 | hb = 'IPythonConsoleApp.hb_port', |
|
63 | 64 | shell = 'IPythonConsoleApp.shell_port', |
|
64 | 65 | iopub = 'IPythonConsoleApp.iopub_port', |
|
65 | 66 | stdin = 'IPythonConsoleApp.stdin_port', |
|
66 | 67 | existing = 'IPythonConsoleApp.existing', |
|
67 | 68 | f = 'IPythonConsoleApp.connection_file', |
|
68 | 69 | |
|
69 | 70 | kernel = 'IPythonConsoleApp.kernel_name', |
|
70 | 71 | |
|
71 | 72 | ssh = 'IPythonConsoleApp.sshserver', |
|
72 | 73 | ) |
|
73 | 74 | aliases.update(app_aliases) |
|
74 | 75 | |
|
75 | 76 | #----------------------------------------------------------------------------- |
|
76 | 77 | # Classes |
|
77 | 78 | #----------------------------------------------------------------------------- |
|
78 | 79 | |
|
79 | 80 | classes = [KernelManager, ProfileDir, Session] |
|
80 | 81 | |
|
81 | 82 | class IPythonConsoleApp(ConnectionFileMixin): |
|
82 | 83 | name = 'ipython-console-mixin' |
|
83 | 84 | |
|
84 | 85 | description = """ |
|
85 | 86 | The IPython Mixin Console. |
|
86 | 87 | |
|
87 | 88 | This class contains the common portions of console client (QtConsole, |
|
88 | 89 | ZMQ-based terminal console, etc). It is not a full console, in that |
|
89 | 90 | launched terminal subprocesses will not be able to accept input. |
|
90 | 91 | |
|
91 | 92 | The Console using this mixing supports various extra features beyond |
|
92 | 93 | the single-process Terminal IPython shell, such as connecting to |
|
93 | 94 | existing kernel, via: |
|
94 | 95 | |
|
95 | 96 | ipython <appname> --existing |
|
96 | 97 | |
|
97 | 98 | as well as tunnel via SSH |
|
98 | 99 | |
|
99 | 100 | """ |
|
100 | 101 | |
|
101 | 102 | classes = classes |
|
102 | 103 | flags = Dict(flags) |
|
103 | 104 | aliases = Dict(aliases) |
|
104 | 105 | kernel_manager_class = KernelManager |
|
105 | 106 | kernel_client_class = BlockingKernelClient |
|
106 | 107 | |
|
107 | 108 | kernel_argv = List(Unicode) |
|
108 | 109 | # frontend flags&aliases to be stripped when building kernel_argv |
|
109 | 110 | frontend_flags = Any(app_flags) |
|
110 | 111 | frontend_aliases = Any(app_aliases) |
|
111 | 112 | |
|
112 | 113 | # create requested profiles by default, if they don't exist: |
|
113 | 114 | auto_create = CBool(True) |
|
114 | 115 | # connection info: |
|
115 | 116 | |
|
116 | 117 | sshserver = Unicode('', config=True, |
|
117 | 118 | help="""The SSH server to use to connect to the kernel.""") |
|
118 | 119 | sshkey = Unicode('', config=True, |
|
119 | 120 | help="""Path to the ssh key to use for logging in to the ssh server.""") |
|
120 | 121 | |
|
121 | 122 | def _connection_file_default(self): |
|
122 | 123 | return 'kernel-%i.json' % os.getpid() |
|
123 | 124 | |
|
124 | 125 | existing = CUnicode('', config=True, |
|
125 | 126 | help="""Connect to an already running kernel""") |
|
126 | 127 | |
|
127 | 128 | kernel_name = Unicode('python', config=True, |
|
128 | 129 | help="""The name of the default kernel to start.""") |
|
129 | 130 | |
|
130 | 131 | confirm_exit = CBool(True, config=True, |
|
131 | 132 | help=""" |
|
132 | 133 | Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
133 | 134 | to force a direct exit without any confirmation.""", |
|
134 | 135 | ) |
|
135 | 136 | |
|
136 | 137 | def build_kernel_argv(self, argv=None): |
|
137 | 138 | """build argv to be passed to kernel subprocess |
|
138 | 139 | |
|
139 | 140 | Override in subclasses if any args should be passed to the kernel |
|
140 | 141 | """ |
|
141 | 142 | self.kernel_argv = self.extra_args |
|
142 | 143 | |
|
143 | 144 | def init_connection_file(self): |
|
144 | 145 | """find the connection file, and load the info if found. |
|
145 | 146 | |
|
146 | 147 | The current working directory and the current profile's security |
|
147 | 148 | directory will be searched for the file if it is not given by |
|
148 | 149 | absolute path. |
|
149 | 150 | |
|
150 | 151 | When attempting to connect to an existing kernel and the `--existing` |
|
151 | 152 | argument does not match an existing file, it will be interpreted as a |
|
152 | 153 | fileglob, and the matching file in the current profile's security dir |
|
153 | 154 | with the latest access time will be used. |
|
154 | 155 | |
|
155 | 156 | After this method is called, self.connection_file contains the *full path* |
|
156 | 157 | to the connection file, never just its name. |
|
157 | 158 | """ |
|
158 | 159 | if self.existing: |
|
159 | 160 | try: |
|
160 | 161 | cf = find_connection_file(self.existing) |
|
161 | 162 | except Exception: |
|
162 | 163 | self.log.critical("Could not find existing kernel connection file %s", self.existing) |
|
163 | 164 | self.exit(1) |
|
164 | 165 | self.log.debug("Connecting to existing kernel: %s" % cf) |
|
165 | 166 | self.connection_file = cf |
|
166 | 167 | else: |
|
167 | 168 | # not existing, check if we are going to write the file |
|
168 | 169 | # and ensure that self.connection_file is a full path, not just the shortname |
|
169 | 170 | try: |
|
170 | 171 | cf = find_connection_file(self.connection_file) |
|
171 | 172 | except Exception: |
|
172 | 173 | # file might not exist |
|
173 | 174 | if self.connection_file == os.path.basename(self.connection_file): |
|
174 | 175 | # just shortname, put it in security dir |
|
175 | 176 | cf = os.path.join(self.profile_dir.security_dir, self.connection_file) |
|
176 | 177 | else: |
|
177 | 178 | cf = self.connection_file |
|
178 | 179 | self.connection_file = cf |
|
179 | 180 | try: |
|
180 | 181 | self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir]) |
|
181 | 182 | except IOError: |
|
182 | 183 | self.log.debug("Connection File not found: %s", self.connection_file) |
|
183 | 184 | return |
|
184 | 185 | |
|
185 | 186 | # should load_connection_file only be used for existing? |
|
186 | 187 | # as it is now, this allows reusing ports if an existing |
|
187 | 188 | # file is requested |
|
188 | 189 | try: |
|
189 | 190 | self.load_connection_file() |
|
190 | 191 | except Exception: |
|
191 | 192 | self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) |
|
192 | 193 | self.exit(1) |
|
193 | 194 | |
|
194 | 195 | def init_ssh(self): |
|
195 | 196 | """set up ssh tunnels, if needed.""" |
|
196 | 197 | if not self.existing or (not self.sshserver and not self.sshkey): |
|
197 | 198 | return |
|
198 | 199 | self.load_connection_file() |
|
199 | 200 | |
|
200 | 201 | transport = self.transport |
|
201 | 202 | ip = self.ip |
|
202 | 203 | |
|
203 | 204 | if transport != 'tcp': |
|
204 | 205 | self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) |
|
205 | 206 | sys.exit(-1) |
|
206 | 207 | |
|
207 | 208 | if self.sshkey and not self.sshserver: |
|
208 | 209 | # specifying just the key implies that we are connecting directly |
|
209 | 210 | self.sshserver = ip |
|
210 | 211 | ip = localhost() |
|
211 | 212 | |
|
212 | 213 | # build connection dict for tunnels: |
|
213 | 214 | info = dict(ip=ip, |
|
214 | 215 | shell_port=self.shell_port, |
|
215 | 216 | iopub_port=self.iopub_port, |
|
216 | 217 | stdin_port=self.stdin_port, |
|
217 | 218 | hb_port=self.hb_port |
|
218 | 219 | ) |
|
219 | 220 | |
|
220 | 221 | self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver)) |
|
221 | 222 | |
|
222 | 223 | # tunnels return a new set of ports, which will be on localhost: |
|
223 | 224 | self.ip = localhost() |
|
224 | 225 | try: |
|
225 | 226 | newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) |
|
226 | 227 | except: |
|
227 | 228 | # even catch KeyboardInterrupt |
|
228 | 229 | self.log.error("Could not setup tunnels", exc_info=True) |
|
229 | 230 | self.exit(1) |
|
230 | 231 | |
|
231 | 232 | self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports |
|
232 | 233 | |
|
233 | 234 | cf = self.connection_file |
|
234 | 235 | base,ext = os.path.splitext(cf) |
|
235 | 236 | base = os.path.basename(base) |
|
236 | 237 | self.connection_file = os.path.basename(base)+'-ssh'+ext |
|
237 | 238 | self.log.info("To connect another client via this tunnel, use:") |
|
238 | 239 | self.log.info("--existing %s" % self.connection_file) |
|
239 | 240 | |
|
240 | 241 | def _new_connection_file(self): |
|
241 | 242 | cf = '' |
|
242 | 243 | while not cf: |
|
243 | 244 | # we don't need a 128b id to distinguish kernels, use more readable |
|
244 | 245 | # 48b node segment (12 hex chars). Users running more than 32k simultaneous |
|
245 | 246 | # kernels can subclass. |
|
246 | 247 | ident = str(uuid.uuid4()).split('-')[-1] |
|
247 | 248 | cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident) |
|
248 | 249 | # only keep if it's actually new. Protect against unlikely collision |
|
249 | 250 | # in 48b random search space |
|
250 | 251 | cf = cf if not os.path.exists(cf) else '' |
|
251 | 252 | return cf |
|
252 | 253 | |
|
253 | 254 | def init_kernel_manager(self): |
|
254 | 255 | # Don't let Qt or ZMQ swallow KeyboardInterupts. |
|
255 | 256 | if self.existing: |
|
256 | 257 | self.kernel_manager = None |
|
257 | 258 | return |
|
258 | 259 | signal.signal(signal.SIGINT, signal.SIG_DFL) |
|
259 | 260 | |
|
260 | 261 | # Create a KernelManager and start a kernel. |
|
261 | 262 | try: |
|
262 | 263 | self.kernel_manager = self.kernel_manager_class( |
|
263 | 264 | ip=self.ip, |
|
264 | 265 | session=self.session, |
|
265 | 266 | transport=self.transport, |
|
266 | 267 | shell_port=self.shell_port, |
|
267 | 268 | iopub_port=self.iopub_port, |
|
268 | 269 | stdin_port=self.stdin_port, |
|
269 | 270 | hb_port=self.hb_port, |
|
270 | 271 | connection_file=self.connection_file, |
|
271 | 272 | kernel_name=self.kernel_name, |
|
272 | 273 | parent=self, |
|
273 | 274 | ipython_dir=self.ipython_dir, |
|
274 | 275 | ) |
|
275 | 276 | except NoSuchKernel: |
|
276 | 277 | self.log.critical("Could not find kernel %s", self.kernel_name) |
|
277 | 278 | self.exit(1) |
|
278 | 279 | |
|
279 | 280 | self.kernel_manager.client_factory = self.kernel_client_class |
|
280 | 281 | # FIXME: remove special treatment of IPython kernels |
|
281 | 282 | kwargs = {} |
|
282 | 283 | if self.kernel_manager.ipython_kernel: |
|
283 | 284 | kwargs['extra_arguments'] = self.kernel_argv |
|
284 | 285 | self.kernel_manager.start_kernel(**kwargs) |
|
285 | 286 | atexit.register(self.kernel_manager.cleanup_ipc_files) |
|
286 | 287 | |
|
287 | 288 | if self.sshserver: |
|
288 | 289 | # ssh, write new connection file |
|
289 | 290 | self.kernel_manager.write_connection_file() |
|
290 | 291 | |
|
291 | 292 | # in case KM defaults / ssh writing changes things: |
|
292 | 293 | km = self.kernel_manager |
|
293 | 294 | self.shell_port=km.shell_port |
|
294 | 295 | self.iopub_port=km.iopub_port |
|
295 | 296 | self.stdin_port=km.stdin_port |
|
296 | 297 | self.hb_port=km.hb_port |
|
297 | 298 | self.connection_file = km.connection_file |
|
298 | 299 | |
|
299 | 300 | atexit.register(self.kernel_manager.cleanup_connection_file) |
|
300 | 301 | |
|
301 | 302 | def init_kernel_client(self): |
|
302 | 303 | if self.kernel_manager is not None: |
|
303 | 304 | self.kernel_client = self.kernel_manager.client() |
|
304 | 305 | else: |
|
305 | 306 | self.kernel_client = self.kernel_client_class( |
|
306 | 307 | session=self.session, |
|
307 | 308 | ip=self.ip, |
|
308 | 309 | transport=self.transport, |
|
309 | 310 | shell_port=self.shell_port, |
|
310 | 311 | iopub_port=self.iopub_port, |
|
311 | 312 | stdin_port=self.stdin_port, |
|
312 | 313 | hb_port=self.hb_port, |
|
313 | 314 | connection_file=self.connection_file, |
|
314 | 315 | parent=self, |
|
315 | 316 | ) |
|
316 | 317 | |
|
317 | 318 | self.kernel_client.start_channels() |
|
318 | 319 | |
|
319 | 320 | |
|
320 | 321 | |
|
321 | 322 | def initialize(self, argv=None): |
|
322 | 323 | """ |
|
323 | 324 | Classes which mix this class in should call: |
|
324 | 325 | IPythonConsoleApp.initialize(self,argv) |
|
325 | 326 | """ |
|
326 | 327 | self.init_connection_file() |
|
327 | 328 | self.init_ssh() |
|
328 | 329 | self.init_kernel_manager() |
|
329 | 330 | self.init_kernel_client() |
|
330 | 331 |
@@ -1,67 +1,29 b'' | |||
|
1 | """IPython kernels and associated utilities | |
|
2 | ||
|
3 | For connecting to kernels, use jupyter_client | |
|
4 | 1 |
""" |
|
5 | ||
|
2 | Shim to maintain backwards compatibility with old IPython.kernel imports. | |
|
3 | """ | |
|
6 | 4 | # Copyright (c) IPython Development Team. |
|
7 | 5 | # Distributed under the terms of the Modified BSD License. |
|
8 | 6 | |
|
9 | # Shim to maintain backwards compatibility with old IPython.kernel imports. | |
|
10 | ||
|
11 | 7 | import sys |
|
12 | 8 | from warnings import warn |
|
13 | 9 | |
|
14 | 10 | warn("The `IPython.kernel` package has been deprecated. " |
|
15 | 11 | "You should import from ipython_kernel or jupyter_client instead.") |
|
16 | 12 | |
|
17 | from IPython.utils.shimmodule import ShimModule | |
|
18 | ||
|
19 | # Shims for jupyter_client | |
|
20 | # Can't do a single shim, because the package didn't move all together | |
|
21 | 13 | |
|
22 | for name in ( | |
|
23 | 'adapter', | |
|
24 | 'blocking', | |
|
25 | 'channels', | |
|
26 | 'channelsabc', | |
|
27 | 'client', | |
|
28 | 'clientabc', | |
|
29 | 'connect', | |
|
30 | 'ioloop', | |
|
31 | 'kernelspec', | |
|
32 | 'kernelspecapp', | |
|
33 | 'launcher', | |
|
34 | 'manager', | |
|
35 | 'managerabc', | |
|
36 | 'multikernelmanager', | |
|
37 | 'restarter', | |
|
38 | 'threaded', | |
|
39 | 'tests.test_adapter', | |
|
40 | 'tests.test_connect', | |
|
41 | 'tests.test_kernelmanager', | |
|
42 | 'tests.test_kernelspec', | |
|
43 | 'tests.test_launcher', | |
|
44 | 'tests.test_multikernelmanager', | |
|
45 | 'tests.test_public_api', | |
|
46 | ): | |
|
47 | sys.modules['IPython.kernel.%s' % name] = \ | |
|
48 | ShimModule(name, mirror='jupyter_client.%s' % name) | |
|
14 | from IPython.utils.shimmodule import ShimModule | |
|
49 | 15 | |
|
50 | # some files moved out of the zmq prefix | |
|
51 | for name in ( | |
|
52 | 'session', | |
|
53 | 'tests.test_session', | |
|
54 | ): | |
|
55 | sys.modules['IPython.kernel.zmq.%s' % name] = \ | |
|
56 | ShimModule(name, mirror='jupyter_client.%s' % name) | |
|
57 | # preserve top-level API modules, all from jupyter_client | |
|
16 | # session moved relative to top-level | |
|
17 | sys.modules['IPython.kernel.zmq.session'] = ShimModule('session', mirror='jupyter_client.session') | |
|
58 | 18 | |
|
59 | # just for friendlier zmq version check | |
|
60 | from . import zmq | |
|
19 | for pkg in ('comm', 'inprocess', 'resources', 'zmq'): | |
|
20 | sys.modules['IPython.kernel.%s' % pkg] = ShimModule(pkg, mirror='ipython_kernel.%s' % pkg) | |
|
21 | for pkg in ('ioloop', 'blocking'): | |
|
22 | sys.modules['IPython.kernel.%s' % pkg] = ShimModule(pkg, mirror='jupyter_client.%s' % pkg) | |
|
61 | 23 | |
|
62 | from jupyter_client.connect import * | |
|
63 | from jupyter_client.launcher import * | |
|
64 |
from jupyter_client |
|
|
65 | from jupyter_client.manager import KernelManager, run_kernel | |
|
66 | from jupyter_client.blocking import BlockingKernelClient | |
|
67 | from jupyter_client.multikernelmanager import MultiKernelManager | |
|
24 | # required for `from IPython.kernel import PKG` | |
|
25 | from ipython_kernel import comm, inprocess, resources, zmq | |
|
26 | from jupyter_client import ioloop, blocking | |
|
27 | # public API | |
|
28 | from ipython_kernel.connect import * | |
|
29 | from jupyter_client import * |
@@ -1,518 +1,520 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """IPython Test Suite Runner. |
|
3 | 3 | |
|
4 | 4 | This module provides a main entry point to a user script to test IPython |
|
5 | 5 | itself from the command line. There are two ways of running this script: |
|
6 | 6 | |
|
7 | 7 | 1. With the syntax `iptest all`. This runs our entire test suite by |
|
8 | 8 | calling this script (with different arguments) recursively. This |
|
9 | 9 | causes modules and package to be tested in different processes, using nose |
|
10 | 10 | or trial where appropriate. |
|
11 | 11 | 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form |
|
12 | 12 | the script simply calls nose, but with special command line flags and |
|
13 | 13 | plugins loaded. |
|
14 | 14 | |
|
15 | 15 | """ |
|
16 | 16 | |
|
17 | 17 | # Copyright (c) IPython Development Team. |
|
18 | 18 | # Distributed under the terms of the Modified BSD License. |
|
19 | 19 | |
|
20 | 20 | from __future__ import print_function |
|
21 | 21 | |
|
22 | 22 | import glob |
|
23 | 23 | from io import BytesIO |
|
24 | 24 | import os |
|
25 | 25 | import os.path as path |
|
26 | 26 | import sys |
|
27 | 27 | from threading import Thread, Lock, Event |
|
28 | 28 | import warnings |
|
29 | 29 | |
|
30 | 30 | import nose.plugins.builtin |
|
31 | 31 | from nose.plugins.xunit import Xunit |
|
32 | 32 | from nose import SkipTest |
|
33 | 33 | from nose.core import TestProgram |
|
34 | 34 | from nose.plugins import Plugin |
|
35 | 35 | from nose.util import safe_str |
|
36 | 36 | |
|
37 | 37 | from IPython.utils.process import is_cmd_found |
|
38 | 38 | from IPython.utils.py3compat import bytes_to_str |
|
39 | 39 | from IPython.utils.importstring import import_item |
|
40 | 40 | from IPython.testing.plugin.ipdoctest import IPythonDoctest |
|
41 | 41 | from IPython.external.decorators import KnownFailure, knownfailureif |
|
42 | 42 | |
|
43 | 43 | pjoin = path.join |
|
44 | 44 | |
|
45 | 45 | #----------------------------------------------------------------------------- |
|
46 | 46 | # Warnings control |
|
47 | 47 | #----------------------------------------------------------------------------- |
|
48 | 48 | |
|
49 | 49 | # Twisted generates annoying warnings with Python 2.6, as will do other code |
|
50 | 50 | # that imports 'sets' as of today |
|
51 | 51 | warnings.filterwarnings('ignore', 'the sets module is deprecated', |
|
52 | 52 | DeprecationWarning ) |
|
53 | 53 | |
|
54 | 54 | # This one also comes from Twisted |
|
55 | 55 | warnings.filterwarnings('ignore', 'the sha module is deprecated', |
|
56 | 56 | DeprecationWarning) |
|
57 | 57 | |
|
58 | 58 | # Wx on Fedora11 spits these out |
|
59 | 59 | warnings.filterwarnings('ignore', 'wxPython/wxWidgets release number mismatch', |
|
60 | 60 | UserWarning) |
|
61 | 61 | |
|
62 | 62 | # ------------------------------------------------------------------------------ |
|
63 | 63 | # Monkeypatch Xunit to count known failures as skipped. |
|
64 | 64 | # ------------------------------------------------------------------------------ |
|
65 | 65 | def monkeypatch_xunit(): |
|
66 | 66 | try: |
|
67 | 67 | knownfailureif(True)(lambda: None)() |
|
68 | 68 | except Exception as e: |
|
69 | 69 | KnownFailureTest = type(e) |
|
70 | 70 | |
|
71 | 71 | def addError(self, test, err, capt=None): |
|
72 | 72 | if issubclass(err[0], KnownFailureTest): |
|
73 | 73 | err = (SkipTest,) + err[1:] |
|
74 | 74 | return self.orig_addError(test, err, capt) |
|
75 | 75 | |
|
76 | 76 | Xunit.orig_addError = Xunit.addError |
|
77 | 77 | Xunit.addError = addError |
|
78 | 78 | |
|
79 | 79 | #----------------------------------------------------------------------------- |
|
80 | 80 | # Check which dependencies are installed and greater than minimum version. |
|
81 | 81 | #----------------------------------------------------------------------------- |
|
82 | 82 | def extract_version(mod): |
|
83 | 83 | return mod.__version__ |
|
84 | 84 | |
|
85 | 85 | def test_for(item, min_version=None, callback=extract_version): |
|
86 | 86 | """Test to see if item is importable, and optionally check against a minimum |
|
87 | 87 | version. |
|
88 | 88 | |
|
89 | 89 | If min_version is given, the default behavior is to check against the |
|
90 | 90 | `__version__` attribute of the item, but specifying `callback` allows you to |
|
91 | 91 | extract the value you are interested in. e.g:: |
|
92 | 92 | |
|
93 | 93 | In [1]: import sys |
|
94 | 94 | |
|
95 | 95 | In [2]: from IPython.testing.iptest import test_for |
|
96 | 96 | |
|
97 | 97 | In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info) |
|
98 | 98 | Out[3]: True |
|
99 | 99 | |
|
100 | 100 | """ |
|
101 | 101 | try: |
|
102 | 102 | check = import_item(item) |
|
103 | 103 | except (ImportError, RuntimeError): |
|
104 | 104 | # GTK reports Runtime error if it can't be initialized even if it's |
|
105 | 105 | # importable. |
|
106 | 106 | return False |
|
107 | 107 | else: |
|
108 | 108 | if min_version: |
|
109 | 109 | if callback: |
|
110 | 110 | # extra processing step to get version to compare |
|
111 | 111 | check = callback(check) |
|
112 | 112 | |
|
113 | 113 | return check >= min_version |
|
114 | 114 | else: |
|
115 | 115 | return True |
|
116 | 116 | |
|
117 | 117 | # Global dict where we can store information on what we have and what we don't |
|
118 | 118 | # have available at test run time |
|
119 | 119 | have = {} |
|
120 | 120 | |
|
121 | 121 | have['curses'] = test_for('_curses') |
|
122 | 122 | have['matplotlib'] = test_for('matplotlib') |
|
123 | 123 | have['numpy'] = test_for('numpy') |
|
124 | 124 | have['pexpect'] = test_for('pexpect') |
|
125 | 125 | have['pymongo'] = test_for('pymongo') |
|
126 | 126 | have['pygments'] = test_for('pygments') |
|
127 | 127 | have['qt'] = test_for('IPython.external.qt') |
|
128 | 128 | have['sqlite3'] = test_for('sqlite3') |
|
129 | 129 | have['tornado'] = test_for('tornado.version_info', (4,0), callback=None) |
|
130 | 130 | have['jinja2'] = test_for('jinja2') |
|
131 | 131 | have['mistune'] = test_for('mistune') |
|
132 | 132 | have['requests'] = test_for('requests') |
|
133 | 133 | have['sphinx'] = test_for('sphinx') |
|
134 | 134 | have['jsonschema'] = test_for('jsonschema') |
|
135 | 135 | have['terminado'] = test_for('terminado') |
|
136 | 136 | have['casperjs'] = is_cmd_found('casperjs') |
|
137 | 137 | have['phantomjs'] = is_cmd_found('phantomjs') |
|
138 | 138 | have['slimerjs'] = is_cmd_found('slimerjs') |
|
139 | 139 | |
|
140 | 140 | min_zmq = (13,) |
|
141 | 141 | |
|
142 | 142 | have['zmq'] = test_for('zmq.pyzmq_version_info', min_zmq, callback=lambda x: x()) |
|
143 | 143 | |
|
144 | 144 | #----------------------------------------------------------------------------- |
|
145 | 145 | # Test suite definitions |
|
146 | 146 | #----------------------------------------------------------------------------- |
|
147 | 147 | |
|
148 | 148 | test_group_names = ['parallel', 'kernel', 'kernel.inprocess', 'config', 'core', |
|
149 | 149 | 'extensions', 'lib', 'terminal', 'testing', 'utils', |
|
150 | 150 | 'nbformat', 'qt', 'html', 'nbconvert' |
|
151 | 151 | ] |
|
152 | 152 | |
|
153 | 153 | class TestSection(object): |
|
154 | 154 | def __init__(self, name, includes): |
|
155 | 155 | self.name = name |
|
156 | 156 | self.includes = includes |
|
157 | 157 | self.excludes = [] |
|
158 | 158 | self.dependencies = [] |
|
159 | 159 | self.enabled = True |
|
160 | 160 | |
|
161 | 161 | def exclude(self, module): |
|
162 | 162 | if not module.startswith('IPython'): |
|
163 | 163 | module = self.includes[0] + "." + module |
|
164 | 164 | self.excludes.append(module.replace('.', os.sep)) |
|
165 | 165 | |
|
166 | 166 | def requires(self, *packages): |
|
167 | 167 | self.dependencies.extend(packages) |
|
168 | 168 | |
|
169 | 169 | @property |
|
170 | 170 | def will_run(self): |
|
171 | 171 | return self.enabled and all(have[p] for p in self.dependencies) |
|
172 | 172 | |
|
173 | 173 | shims = { |
|
174 | 174 | 'parallel': 'ipython_parallel', |
|
175 | 'kernel': 'ipython_kernel', | |
|
176 | 'kernel.inprocess': 'ipython_kernel.inprocess', | |
|
175 | 177 | } |
|
176 | 178 | |
|
177 | 179 | # Name -> (include, exclude, dependencies_met) |
|
178 | 180 | test_sections = {n:TestSection(n, [shims.get(n, 'IPython.%s' % n)]) for n in test_group_names} |
|
179 | 181 | |
|
180 | 182 | |
|
181 | 183 | # Exclusions and dependencies |
|
182 | 184 | # --------------------------- |
|
183 | 185 | |
|
184 | 186 | # core: |
|
185 | 187 | sec = test_sections['core'] |
|
186 | 188 | if not have['sqlite3']: |
|
187 | 189 | sec.exclude('tests.test_history') |
|
188 | 190 | sec.exclude('history') |
|
189 | 191 | if not have['matplotlib']: |
|
190 | 192 | sec.exclude('pylabtools'), |
|
191 | 193 | sec.exclude('tests.test_pylabtools') |
|
192 | 194 | |
|
193 | 195 | # lib: |
|
194 | 196 | sec = test_sections['lib'] |
|
195 | 197 | if not have['zmq']: |
|
196 | 198 | sec.exclude('kernel') |
|
197 | 199 | # We do this unconditionally, so that the test suite doesn't import |
|
198 | 200 | # gtk, changing the default encoding and masking some unicode bugs. |
|
199 | 201 | sec.exclude('inputhookgtk') |
|
200 | 202 | # We also do this unconditionally, because wx can interfere with Unix signals. |
|
201 | 203 | # There are currently no tests for it anyway. |
|
202 | 204 | sec.exclude('inputhookwx') |
|
203 | 205 | # Testing inputhook will need a lot of thought, to figure out |
|
204 | 206 | # how to have tests that don't lock up with the gui event |
|
205 | 207 | # loops in the picture |
|
206 | 208 | sec.exclude('inputhook') |
|
207 | 209 | |
|
208 | 210 | # testing: |
|
209 | 211 | sec = test_sections['testing'] |
|
210 | 212 | # These have to be skipped on win32 because they use echo, rm, cd, etc. |
|
211 | 213 | # See ticket https://github.com/ipython/ipython/issues/87 |
|
212 | 214 | if sys.platform == 'win32': |
|
213 | 215 | sec.exclude('plugin.test_exampleip') |
|
214 | 216 | sec.exclude('plugin.dtexample') |
|
215 | 217 | |
|
216 | 218 | # terminal: |
|
217 | 219 | if (not have['pexpect']) or (not have['zmq']): |
|
218 | 220 | test_sections['terminal'].exclude('console') |
|
219 | 221 | |
|
220 | 222 | # parallel |
|
221 | 223 | sec = test_sections['parallel'] |
|
222 | 224 | sec.requires('zmq') |
|
223 | 225 | if not have['pymongo']: |
|
224 | 226 | sec.exclude('controller.mongodb') |
|
225 | 227 | sec.exclude('tests.test_mongodb') |
|
226 | 228 | |
|
227 | 229 | # kernel: |
|
228 | 230 | sec = test_sections['kernel'] |
|
229 | 231 | sec.requires('zmq') |
|
230 | 232 | # The in-process kernel tests are done in a separate section |
|
231 | 233 | sec.exclude('inprocess') |
|
232 | 234 | # importing gtk sets the default encoding, which we want to avoid |
|
233 | 235 | sec.exclude('zmq.gui.gtkembed') |
|
234 | 236 | sec.exclude('zmq.gui.gtk3embed') |
|
235 | 237 | if not have['matplotlib']: |
|
236 | 238 | sec.exclude('zmq.pylab') |
|
237 | 239 | |
|
238 | 240 | # kernel.inprocess: |
|
239 | 241 | test_sections['kernel.inprocess'].requires('zmq') |
|
240 | 242 | |
|
241 | 243 | # extensions: |
|
242 | 244 | sec = test_sections['extensions'] |
|
243 | 245 | # This is deprecated in favour of rpy2 |
|
244 | 246 | sec.exclude('rmagic') |
|
245 | 247 | # autoreload does some strange stuff, so move it to its own test section |
|
246 | 248 | sec.exclude('autoreload') |
|
247 | 249 | sec.exclude('tests.test_autoreload') |
|
248 | 250 | test_sections['autoreload'] = TestSection('autoreload', |
|
249 | 251 | ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload']) |
|
250 | 252 | test_group_names.append('autoreload') |
|
251 | 253 | |
|
252 | 254 | # qt: |
|
253 | 255 | test_sections['qt'].requires('zmq', 'qt', 'pygments') |
|
254 | 256 | |
|
255 | 257 | # html: |
|
256 | 258 | sec = test_sections['html'] |
|
257 | 259 | sec.requires('zmq', 'tornado', 'requests', 'sqlite3', 'jsonschema') |
|
258 | 260 | # The notebook 'static' directory contains JS, css and other |
|
259 | 261 | # files for web serving. Occasionally projects may put a .py |
|
260 | 262 | # file in there (MathJax ships a conf.py), so we might as |
|
261 | 263 | # well play it safe and skip the whole thing. |
|
262 | 264 | sec.exclude('static') |
|
263 | 265 | sec.exclude('tasks') |
|
264 | 266 | if not have['jinja2']: |
|
265 | 267 | sec.exclude('notebookapp') |
|
266 | 268 | if not have['pygments'] or not have['jinja2']: |
|
267 | 269 | sec.exclude('nbconvert') |
|
268 | 270 | if not have['terminado']: |
|
269 | 271 | sec.exclude('terminal') |
|
270 | 272 | |
|
271 | 273 | # config: |
|
272 | 274 | # Config files aren't really importable stand-alone |
|
273 | 275 | test_sections['config'].exclude('profile') |
|
274 | 276 | |
|
275 | 277 | # nbconvert: |
|
276 | 278 | sec = test_sections['nbconvert'] |
|
277 | 279 | sec.requires('pygments', 'jinja2', 'jsonschema', 'mistune') |
|
278 | 280 | # Exclude nbconvert directories containing config files used to test. |
|
279 | 281 | # Executing the config files with iptest would cause an exception. |
|
280 | 282 | sec.exclude('tests.files') |
|
281 | 283 | sec.exclude('exporters.tests.files') |
|
282 | 284 | if not have['tornado']: |
|
283 | 285 | sec.exclude('nbconvert.post_processors.serve') |
|
284 | 286 | sec.exclude('nbconvert.post_processors.tests.test_serve') |
|
285 | 287 | |
|
286 | 288 | # nbformat: |
|
287 | 289 | test_sections['nbformat'].requires('jsonschema') |
|
288 | 290 | |
|
289 | 291 | #----------------------------------------------------------------------------- |
|
290 | 292 | # Functions and classes |
|
291 | 293 | #----------------------------------------------------------------------------- |
|
292 | 294 | |
|
293 | 295 | def check_exclusions_exist(): |
|
294 | 296 | from IPython.utils.path import get_ipython_package_dir |
|
295 | 297 | from IPython.utils.warn import warn |
|
296 | 298 | parent = os.path.dirname(get_ipython_package_dir()) |
|
297 | 299 | for sec in test_sections: |
|
298 | 300 | for pattern in sec.exclusions: |
|
299 | 301 | fullpath = pjoin(parent, pattern) |
|
300 | 302 | if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'): |
|
301 | 303 | warn("Excluding nonexistent file: %r" % pattern) |
|
302 | 304 | |
|
303 | 305 | |
|
304 | 306 | class ExclusionPlugin(Plugin): |
|
305 | 307 | """A nose plugin to effect our exclusions of files and directories. |
|
306 | 308 | """ |
|
307 | 309 | name = 'exclusions' |
|
308 | 310 | score = 3000 # Should come before any other plugins |
|
309 | 311 | |
|
310 | 312 | def __init__(self, exclude_patterns=None): |
|
311 | 313 | """ |
|
312 | 314 | Parameters |
|
313 | 315 | ---------- |
|
314 | 316 | |
|
315 | 317 | exclude_patterns : sequence of strings, optional |
|
316 | 318 | Filenames containing these patterns (as raw strings, not as regular |
|
317 | 319 | expressions) are excluded from the tests. |
|
318 | 320 | """ |
|
319 | 321 | self.exclude_patterns = exclude_patterns or [] |
|
320 | 322 | super(ExclusionPlugin, self).__init__() |
|
321 | 323 | |
|
322 | 324 | def options(self, parser, env=os.environ): |
|
323 | 325 | Plugin.options(self, parser, env) |
|
324 | 326 | |
|
325 | 327 | def configure(self, options, config): |
|
326 | 328 | Plugin.configure(self, options, config) |
|
327 | 329 | # Override nose trying to disable plugin. |
|
328 | 330 | self.enabled = True |
|
329 | 331 | |
|
330 | 332 | def wantFile(self, filename): |
|
331 | 333 | """Return whether the given filename should be scanned for tests. |
|
332 | 334 | """ |
|
333 | 335 | if any(pat in filename for pat in self.exclude_patterns): |
|
334 | 336 | return False |
|
335 | 337 | return None |
|
336 | 338 | |
|
337 | 339 | def wantDirectory(self, directory): |
|
338 | 340 | """Return whether the given directory should be scanned for tests. |
|
339 | 341 | """ |
|
340 | 342 | if any(pat in directory for pat in self.exclude_patterns): |
|
341 | 343 | return False |
|
342 | 344 | return None |
|
343 | 345 | |
|
344 | 346 | |
|
345 | 347 | class StreamCapturer(Thread): |
|
346 | 348 | daemon = True # Don't hang if main thread crashes |
|
347 | 349 | started = False |
|
348 | 350 | def __init__(self, echo=False): |
|
349 | 351 | super(StreamCapturer, self).__init__() |
|
350 | 352 | self.echo = echo |
|
351 | 353 | self.streams = [] |
|
352 | 354 | self.buffer = BytesIO() |
|
353 | 355 | self.readfd, self.writefd = os.pipe() |
|
354 | 356 | self.buffer_lock = Lock() |
|
355 | 357 | self.stop = Event() |
|
356 | 358 | |
|
357 | 359 | def run(self): |
|
358 | 360 | self.started = True |
|
359 | 361 | |
|
360 | 362 | while not self.stop.is_set(): |
|
361 | 363 | chunk = os.read(self.readfd, 1024) |
|
362 | 364 | |
|
363 | 365 | with self.buffer_lock: |
|
364 | 366 | self.buffer.write(chunk) |
|
365 | 367 | if self.echo: |
|
366 | 368 | sys.stdout.write(bytes_to_str(chunk)) |
|
367 | 369 | |
|
368 | 370 | os.close(self.readfd) |
|
369 | 371 | os.close(self.writefd) |
|
370 | 372 | |
|
371 | 373 | def reset_buffer(self): |
|
372 | 374 | with self.buffer_lock: |
|
373 | 375 | self.buffer.truncate(0) |
|
374 | 376 | self.buffer.seek(0) |
|
375 | 377 | |
|
376 | 378 | def get_buffer(self): |
|
377 | 379 | with self.buffer_lock: |
|
378 | 380 | return self.buffer.getvalue() |
|
379 | 381 | |
|
380 | 382 | def ensure_started(self): |
|
381 | 383 | if not self.started: |
|
382 | 384 | self.start() |
|
383 | 385 | |
|
384 | 386 | def halt(self): |
|
385 | 387 | """Safely stop the thread.""" |
|
386 | 388 | if not self.started: |
|
387 | 389 | return |
|
388 | 390 | |
|
389 | 391 | self.stop.set() |
|
390 | 392 | os.write(self.writefd, b'\0') # Ensure we're not locked in a read() |
|
391 | 393 | self.join() |
|
392 | 394 | |
|
393 | 395 | class SubprocessStreamCapturePlugin(Plugin): |
|
394 | 396 | name='subprocstreams' |
|
395 | 397 | def __init__(self): |
|
396 | 398 | Plugin.__init__(self) |
|
397 | 399 | self.stream_capturer = StreamCapturer() |
|
398 | 400 | self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture') |
|
399 | 401 | # This is ugly, but distant parts of the test machinery need to be able |
|
400 | 402 | # to redirect streams, so we make the object globally accessible. |
|
401 | 403 | nose.iptest_stdstreams_fileno = self.get_write_fileno |
|
402 | 404 | |
|
403 | 405 | def get_write_fileno(self): |
|
404 | 406 | if self.destination == 'capture': |
|
405 | 407 | self.stream_capturer.ensure_started() |
|
406 | 408 | return self.stream_capturer.writefd |
|
407 | 409 | elif self.destination == 'discard': |
|
408 | 410 | return os.open(os.devnull, os.O_WRONLY) |
|
409 | 411 | else: |
|
410 | 412 | return sys.__stdout__.fileno() |
|
411 | 413 | |
|
412 | 414 | def configure(self, options, config): |
|
413 | 415 | Plugin.configure(self, options, config) |
|
414 | 416 | # Override nose trying to disable plugin. |
|
415 | 417 | if self.destination == 'capture': |
|
416 | 418 | self.enabled = True |
|
417 | 419 | |
|
418 | 420 | def startTest(self, test): |
|
419 | 421 | # Reset log capture |
|
420 | 422 | self.stream_capturer.reset_buffer() |
|
421 | 423 | |
|
422 | 424 | def formatFailure(self, test, err): |
|
423 | 425 | # Show output |
|
424 | 426 | ec, ev, tb = err |
|
425 | 427 | captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace') |
|
426 | 428 | if captured.strip(): |
|
427 | 429 | ev = safe_str(ev) |
|
428 | 430 | out = [ev, '>> begin captured subprocess output <<', |
|
429 | 431 | captured, |
|
430 | 432 | '>> end captured subprocess output <<'] |
|
431 | 433 | return ec, '\n'.join(out), tb |
|
432 | 434 | |
|
433 | 435 | return err |
|
434 | 436 | |
|
435 | 437 | formatError = formatFailure |
|
436 | 438 | |
|
437 | 439 | def finalize(self, result): |
|
438 | 440 | self.stream_capturer.halt() |
|
439 | 441 | |
|
440 | 442 | |
|
441 | 443 | def run_iptest(): |
|
442 | 444 | """Run the IPython test suite using nose. |
|
443 | 445 | |
|
444 | 446 | This function is called when this script is **not** called with the form |
|
445 | 447 | `iptest all`. It simply calls nose with appropriate command line flags |
|
446 | 448 | and accepts all of the standard nose arguments. |
|
447 | 449 | """ |
|
448 | 450 | # Apply our monkeypatch to Xunit |
|
449 | 451 | if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'): |
|
450 | 452 | monkeypatch_xunit() |
|
451 | 453 | |
|
452 | 454 | warnings.filterwarnings('ignore', |
|
453 | 455 | 'This will be removed soon. Use IPython.testing.util instead') |
|
454 | 456 | |
|
455 | 457 | arg1 = sys.argv[1] |
|
456 | 458 | if arg1 in test_sections: |
|
457 | 459 | section = test_sections[arg1] |
|
458 | 460 | sys.argv[1:2] = section.includes |
|
459 | 461 | elif arg1.startswith('IPython.') and arg1[8:] in test_sections: |
|
460 | 462 | section = test_sections[arg1[8:]] |
|
461 | 463 | sys.argv[1:2] = section.includes |
|
462 | 464 | else: |
|
463 | 465 | section = TestSection(arg1, includes=[arg1]) |
|
464 | 466 | |
|
465 | 467 | |
|
466 | 468 | argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks |
|
467 | 469 | |
|
468 | 470 | '--with-ipdoctest', |
|
469 | 471 | '--ipdoctest-tests','--ipdoctest-extension=txt', |
|
470 | 472 | |
|
471 | 473 | # We add --exe because of setuptools' imbecility (it |
|
472 | 474 | # blindly does chmod +x on ALL files). Nose does the |
|
473 | 475 | # right thing and it tries to avoid executables, |
|
474 | 476 | # setuptools unfortunately forces our hand here. This |
|
475 | 477 | # has been discussed on the distutils list and the |
|
476 | 478 | # setuptools devs refuse to fix this problem! |
|
477 | 479 | '--exe', |
|
478 | 480 | ] |
|
479 | 481 | if '-a' not in argv and '-A' not in argv: |
|
480 | 482 | argv = argv + ['-a', '!crash'] |
|
481 | 483 | |
|
482 | 484 | if nose.__version__ >= '0.11': |
|
483 | 485 | # I don't fully understand why we need this one, but depending on what |
|
484 | 486 | # directory the test suite is run from, if we don't give it, 0 tests |
|
485 | 487 | # get run. Specifically, if the test suite is run from the source dir |
|
486 | 488 | # with an argument (like 'iptest.py IPython.core', 0 tests are run, |
|
487 | 489 | # even if the same call done in this directory works fine). It appears |
|
488 | 490 | # that if the requested package is in the current dir, nose bails early |
|
489 | 491 | # by default. Since it's otherwise harmless, leave it in by default |
|
490 | 492 | # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it. |
|
491 | 493 | argv.append('--traverse-namespace') |
|
492 | 494 | |
|
493 | 495 | # use our plugin for doctesting. It will remove the standard doctest plugin |
|
494 | 496 | # if it finds it enabled |
|
495 | 497 | plugins = [ExclusionPlugin(section.excludes), IPythonDoctest(), KnownFailure(), |
|
496 | 498 | SubprocessStreamCapturePlugin() ] |
|
497 | 499 | |
|
498 | 500 | # Use working directory set by parent process (see iptestcontroller) |
|
499 | 501 | if 'IPTEST_WORKING_DIR' in os.environ: |
|
500 | 502 | os.chdir(os.environ['IPTEST_WORKING_DIR']) |
|
501 | 503 | |
|
502 | 504 | # We need a global ipython running in this process, but the special |
|
503 | 505 | # in-process group spawns its own IPython kernels, so for *that* group we |
|
504 | 506 | # must avoid also opening the global one (otherwise there's a conflict of |
|
505 | 507 | # singletons). Ultimately the solution to this problem is to refactor our |
|
506 | 508 | # assumptions about what needs to be a singleton and what doesn't (app |
|
507 | 509 | # objects should, individual shells shouldn't). But for now, this |
|
508 | 510 | # workaround allows the test suite for the inprocess module to complete. |
|
509 | 511 | if 'kernel.inprocess' not in section.name: |
|
510 | 512 | from IPython.testing import globalipapp |
|
511 | 513 | globalipapp.start_ipython() |
|
512 | 514 | |
|
513 | 515 | # Now nose can run |
|
514 | 516 | TestProgram(argv=argv, addplugins=plugins) |
|
515 | 517 | |
|
516 | 518 | if __name__ == '__main__': |
|
517 | 519 | run_iptest() |
|
518 | 520 |
@@ -1,226 +1,226 b'' | |||
|
1 | 1 | """Utilities for launching kernels |
|
2 | 2 | """ |
|
3 | 3 | |
|
4 | 4 | # Copyright (c) IPython Development Team. |
|
5 | 5 | # Distributed under the terms of the Modified BSD License. |
|
6 | 6 | |
|
7 | 7 | import os |
|
8 | 8 | import sys |
|
9 | 9 | from subprocess import Popen, PIPE |
|
10 | 10 | |
|
11 | 11 | from IPython.utils.encoding import getdefaultencoding |
|
12 | 12 | from IPython.utils.py3compat import cast_bytes_py2 |
|
13 | 13 | |
|
14 | 14 | |
|
15 | 15 | def swallow_argv(argv, aliases=None, flags=None): |
|
16 | 16 | """strip frontend-specific aliases and flags from an argument list |
|
17 | 17 | |
|
18 | 18 | For use primarily in frontend apps that want to pass a subset of command-line |
|
19 | 19 | arguments through to a subprocess, where frontend-specific flags and aliases |
|
20 | 20 | should be removed from the list. |
|
21 | 21 | |
|
22 | 22 | Parameters |
|
23 | 23 | ---------- |
|
24 | 24 | |
|
25 | 25 | argv : list(str) |
|
26 | 26 | The starting argv, to be filtered |
|
27 | 27 | aliases : container of aliases (dict, list, set, etc.) |
|
28 | 28 | The frontend-specific aliases to be removed |
|
29 | 29 | flags : container of flags (dict, list, set, etc.) |
|
30 | 30 | The frontend-specific flags to be removed |
|
31 | 31 | |
|
32 | 32 | Returns |
|
33 | 33 | ------- |
|
34 | 34 | |
|
35 | 35 | argv : list(str) |
|
36 | 36 | The argv list, excluding flags and aliases that have been stripped |
|
37 | 37 | """ |
|
38 | 38 | |
|
39 | 39 | if aliases is None: |
|
40 | 40 | aliases = set() |
|
41 | 41 | if flags is None: |
|
42 | 42 | flags = set() |
|
43 | 43 | |
|
44 | 44 | stripped = list(argv) # copy |
|
45 | 45 | |
|
46 | 46 | swallow_next = False |
|
47 | 47 | was_flag = False |
|
48 | 48 | for a in argv: |
|
49 | 49 | if a == '--': |
|
50 | 50 | break |
|
51 | 51 | if swallow_next: |
|
52 | 52 | swallow_next = False |
|
53 | 53 | # last arg was an alias, remove the next one |
|
54 | 54 | # *unless* the last alias has a no-arg flag version, in which |
|
55 | 55 | # case, don't swallow the next arg if it's also a flag: |
|
56 | 56 | if not (was_flag and a.startswith('-')): |
|
57 | 57 | stripped.remove(a) |
|
58 | 58 | continue |
|
59 | 59 | if a.startswith('-'): |
|
60 | 60 | split = a.lstrip('-').split('=') |
|
61 | 61 | name = split[0] |
|
62 | 62 | # we use startswith because argparse accepts any arg to be specified |
|
63 | 63 | # by any leading section, as long as it is unique, |
|
64 | 64 | # so `--no-br` means `--no-browser` in the notebook, etc. |
|
65 | 65 | if any(alias.startswith(name) for alias in aliases): |
|
66 | 66 | stripped.remove(a) |
|
67 | 67 | if len(split) == 1: |
|
68 | 68 | # alias passed with arg via space |
|
69 | 69 | swallow_next = True |
|
70 | 70 | # could have been a flag that matches an alias, e.g. `existing` |
|
71 | 71 | # in which case, we might not swallow the next arg |
|
72 | 72 | was_flag = name in flags |
|
73 | 73 | elif len(split) == 1 and any(flag.startswith(name) for flag in flags): |
|
74 | 74 | # strip flag, but don't swallow next, as flags don't take args |
|
75 | 75 | stripped.remove(a) |
|
76 | 76 | |
|
77 | 77 | # return shortened list |
|
78 | 78 | return stripped |
|
79 | 79 | |
|
80 | 80 | |
|
81 |
def make_ipkernel_cmd(mod=' |
|
|
81 | def make_ipkernel_cmd(mod='ipython_kernel', executable=None, extra_arguments=[], **kw): | |
|
82 | 82 | """Build Popen command list for launching an IPython kernel. |
|
83 | 83 | |
|
84 | 84 | Parameters |
|
85 | 85 | ---------- |
|
86 |
mod : str, optional (default ' |
|
|
86 | mod : str, optional (default 'ipython_kernel') | |
|
87 | 87 | A string of an IPython module whose __main__ starts an IPython kernel |
|
88 | 88 | |
|
89 | 89 | executable : str, optional (default sys.executable) |
|
90 | 90 | The Python executable to use for the kernel process. |
|
91 | 91 | |
|
92 | 92 | extra_arguments : list, optional |
|
93 | 93 | A list of extra arguments to pass when executing the launch code. |
|
94 | 94 | |
|
95 | 95 | Returns |
|
96 | 96 | ------- |
|
97 | 97 | |
|
98 | 98 | A Popen command list |
|
99 | 99 | """ |
|
100 | 100 | if executable is None: |
|
101 | 101 | executable = sys.executable |
|
102 | 102 | arguments = [ executable, '-m', mod, '-f', '{connection_file}' ] |
|
103 | 103 | arguments.extend(extra_arguments) |
|
104 | 104 | |
|
105 | 105 | return arguments |
|
106 | 106 | |
|
107 | 107 | |
|
108 | 108 | def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None, |
|
109 | 109 | independent=False, |
|
110 | 110 | cwd=None, |
|
111 | 111 | **kw |
|
112 | 112 | ): |
|
113 | 113 | """ Launches a localhost kernel, binding to the specified ports. |
|
114 | 114 | |
|
115 | 115 | Parameters |
|
116 | 116 | ---------- |
|
117 | 117 | cmd : Popen list, |
|
118 | 118 | A string of Python code that imports and executes a kernel entry point. |
|
119 | 119 | |
|
120 | 120 | stdin, stdout, stderr : optional (default None) |
|
121 | 121 | Standards streams, as defined in subprocess.Popen. |
|
122 | 122 | |
|
123 | 123 | independent : bool, optional (default False) |
|
124 | 124 | If set, the kernel process is guaranteed to survive if this process |
|
125 | 125 | dies. If not set, an effort is made to ensure that the kernel is killed |
|
126 | 126 | when this process dies. Note that in this case it is still good practice |
|
127 | 127 | to kill kernels manually before exiting. |
|
128 | 128 | |
|
129 | 129 | cwd : path, optional |
|
130 | 130 | The working dir of the kernel process (default: cwd of this process). |
|
131 | 131 | |
|
132 | 132 | Returns |
|
133 | 133 | ------- |
|
134 | 134 | |
|
135 | 135 | Popen instance for the kernel subprocess |
|
136 | 136 | """ |
|
137 | 137 | |
|
138 | 138 | # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr |
|
139 | 139 | # are invalid. Unfortunately, there is in general no way to detect whether |
|
140 | 140 | # they are valid. The following two blocks redirect them to (temporary) |
|
141 | 141 | # pipes in certain important cases. |
|
142 | 142 | |
|
143 | 143 | # If this process has been backgrounded, our stdin is invalid. Since there |
|
144 | 144 | # is no compelling reason for the kernel to inherit our stdin anyway, we'll |
|
145 | 145 | # place this one safe and always redirect. |
|
146 | 146 | redirect_in = True |
|
147 | 147 | _stdin = PIPE if stdin is None else stdin |
|
148 | 148 | |
|
149 | 149 | # If this process in running on pythonw, we know that stdin, stdout, and |
|
150 | 150 | # stderr are all invalid. |
|
151 | 151 | redirect_out = sys.executable.endswith('pythonw.exe') |
|
152 | 152 | if redirect_out: |
|
153 | 153 | blackhole = open(os.devnull, 'w') |
|
154 | 154 | _stdout = blackhole if stdout is None else stdout |
|
155 | 155 | _stderr = blackhole if stderr is None else stderr |
|
156 | 156 | else: |
|
157 | 157 | _stdout, _stderr = stdout, stderr |
|
158 | 158 | |
|
159 | 159 | env = env if (env is not None) else os.environ.copy() |
|
160 | 160 | |
|
161 | 161 | encoding = getdefaultencoding(prefer_stream=False) |
|
162 | 162 | kwargs = dict( |
|
163 | 163 | stdin=_stdin, |
|
164 | 164 | stdout=_stdout, |
|
165 | 165 | stderr=_stderr, |
|
166 | 166 | cwd=cwd, |
|
167 | 167 | env=env, |
|
168 | 168 | ) |
|
169 | 169 | |
|
170 | 170 | # Spawn a kernel. |
|
171 | 171 | if sys.platform == 'win32': |
|
172 | 172 | # Popen on Python 2 on Windows cannot handle unicode args or cwd |
|
173 | 173 | cmd = [ cast_bytes_py2(c, encoding) for c in cmd ] |
|
174 | 174 | if cwd: |
|
175 | 175 | cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii') |
|
176 | 176 | kwargs['cwd'] = cwd |
|
177 | 177 | |
|
178 | 178 | from jupyter_client.parentpoller import ParentPollerWindows |
|
179 | 179 | # Create a Win32 event for interrupting the kernel |
|
180 | 180 | # and store it in an environment variable. |
|
181 | 181 | interrupt_event = ParentPollerWindows.create_interrupt_event() |
|
182 | 182 | env["JPY_INTERRUPT_EVENT"] = str(interrupt_event) |
|
183 | 183 | # deprecated old env name: |
|
184 | 184 | env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"] |
|
185 | 185 | |
|
186 | 186 | try: |
|
187 | 187 | from _winapi import DuplicateHandle, GetCurrentProcess, \ |
|
188 | 188 | DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP |
|
189 | 189 | except: |
|
190 | 190 | from _subprocess import DuplicateHandle, GetCurrentProcess, \ |
|
191 | 191 | DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP |
|
192 | 192 | # Launch the kernel process |
|
193 | 193 | if independent: |
|
194 | 194 | kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP |
|
195 | 195 | else: |
|
196 | 196 | pid = GetCurrentProcess() |
|
197 | 197 | handle = DuplicateHandle(pid, pid, pid, 0, |
|
198 | 198 | True, # Inheritable by new processes. |
|
199 | 199 | DUPLICATE_SAME_ACCESS) |
|
200 | 200 | env['JPY_PARENT_PID'] = str(int(handle)) |
|
201 | 201 | |
|
202 | 202 | proc = Popen(cmd, **kwargs) |
|
203 | 203 | |
|
204 | 204 | # Attach the interrupt event to the Popen objet so it can be used later. |
|
205 | 205 | proc.win32_interrupt_event = interrupt_event |
|
206 | 206 | |
|
207 | 207 | else: |
|
208 | 208 | if independent: |
|
209 | 209 | kwargs['preexec_fn'] = lambda: os.setsid() |
|
210 | 210 | else: |
|
211 | 211 | env['JPY_PARENT_PID'] = str(os.getpid()) |
|
212 | 212 | |
|
213 | 213 | proc = Popen(cmd, **kwargs) |
|
214 | 214 | |
|
215 | 215 | # Clean up pipes created to work around Popen bug. |
|
216 | 216 | if redirect_in: |
|
217 | 217 | if stdin is None: |
|
218 | 218 | proc.stdin.close() |
|
219 | 219 | |
|
220 | 220 | return proc |
|
221 | 221 | |
|
222 | 222 | __all__ = [ |
|
223 | 223 | 'swallow_argv', |
|
224 | 224 | 'make_ipkernel_cmd', |
|
225 | 225 | 'launch_kernel', |
|
226 | 226 | ] |
General Comments 0
You need to be logged in to leave comments.
Login now