Show More
@@ -0,0 +1,268 b'' | |||
|
1 | """Simple utility for building a list of local IPs using the socket module. | |
|
2 | This module defines two constants: | |
|
3 | ||
|
4 | LOCALHOST : The loopback interface, or the first interface that points to this | |
|
5 | machine. It will *almost* always be '127.0.0.1' | |
|
6 | ||
|
7 | LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine. | |
|
8 | This will include LOCALHOST, PUBLIC_IPS, and aliases for all hosts, | |
|
9 | such as '0.0.0.0'. | |
|
10 | ||
|
11 | PUBLIC_IPS : A list of public IP addresses that point to this machine. | |
|
12 | Use these to tell remote clients where to find you. | |
|
13 | """ | |
|
14 | ||
|
15 | # Copyright (c) IPython Development Team. | |
|
16 | # Distributed under the terms of the Modified BSD License. | |
|
17 | ||
|
18 | import os | |
|
19 | import re | |
|
20 | import socket | |
|
21 | ||
|
22 | from IPython.utils.data import uniq_stable | |
|
23 | from IPython.utils.process import get_output_error_code | |
|
24 | from warnings import warn | |
|
25 | ||
|
26 | ||
|
27 | LOCAL_IPS = [] | |
|
28 | PUBLIC_IPS = [] | |
|
29 | ||
|
30 | LOCALHOST = '' | |
|
31 | ||
|
32 | def _only_once(f): | |
|
33 | """decorator to only run a function once""" | |
|
34 | f.called = False | |
|
35 | def wrapped(**kwargs): | |
|
36 | if f.called: | |
|
37 | return | |
|
38 | ret = f(**kwargs) | |
|
39 | f.called = True | |
|
40 | return ret | |
|
41 | return wrapped | |
|
42 | ||
|
43 | def _requires_ips(f): | |
|
44 | """decorator to ensure load_ips has been run before f""" | |
|
45 | def ips_loaded(*args, **kwargs): | |
|
46 | _load_ips() | |
|
47 | return f(*args, **kwargs) | |
|
48 | return ips_loaded | |
|
49 | ||
|
50 | # subprocess-parsing ip finders | |
|
51 | class NoIPAddresses(Exception): | |
|
52 | pass | |
|
53 | ||
|
54 | def _populate_from_list(addrs): | |
|
55 | """populate local and public IPs from flat list of all IPs""" | |
|
56 | if not addrs: | |
|
57 | raise NoIPAddresses | |
|
58 | ||
|
59 | global LOCALHOST | |
|
60 | public_ips = [] | |
|
61 | local_ips = [] | |
|
62 | ||
|
63 | for ip in addrs: | |
|
64 | local_ips.append(ip) | |
|
65 | if not ip.startswith('127.'): | |
|
66 | public_ips.append(ip) | |
|
67 | elif not LOCALHOST: | |
|
68 | LOCALHOST = ip | |
|
69 | ||
|
70 | if not LOCALHOST: | |
|
71 | LOCALHOST = '127.0.0.1' | |
|
72 | local_ips.insert(0, LOCALHOST) | |
|
73 | ||
|
74 | local_ips.extend(['0.0.0.0', '']) | |
|
75 | ||
|
76 | LOCAL_IPS[:] = uniq_stable(local_ips) | |
|
77 | PUBLIC_IPS[:] = uniq_stable(public_ips) | |
|
78 | ||
|
79 | def _load_ips_ifconfig(): | |
|
80 | """load ip addresses from `ifconfig` output (posix)""" | |
|
81 | ||
|
82 | out, err, rc = get_output_error_code('ifconfig') | |
|
83 | if rc: | |
|
84 | # no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH | |
|
85 | out, err, rc = get_output_error_code('/sbin/ifconfig') | |
|
86 | if rc: | |
|
87 | raise IOError("no ifconfig: %s" % err) | |
|
88 | ||
|
89 | lines = out.splitlines() | |
|
90 | addrs = [] | |
|
91 | for line in lines: | |
|
92 | blocks = line.lower().split() | |
|
93 | if (len(blocks) >= 2) and (blocks[0] == 'inet'): | |
|
94 | if blocks[1].startswith("addr:"): | |
|
95 | addrs.append(blocks[1].split(":")[1]) | |
|
96 | else: | |
|
97 | addrs.append(blocks[1]) | |
|
98 | _populate_from_list(addrs) | |
|
99 | ||
|
100 | ||
|
101 | def _load_ips_ip(): | |
|
102 | """load ip addresses from `ip addr` output (Linux)""" | |
|
103 | out, err, rc = get_output_error_code('ip addr') | |
|
104 | if rc: | |
|
105 | raise IOError("no ip: %s" % err) | |
|
106 | ||
|
107 | lines = out.splitlines() | |
|
108 | addrs = [] | |
|
109 | for line in lines: | |
|
110 | blocks = line.lower().split() | |
|
111 | if (len(blocks) >= 2) and (blocks[0] == 'inet'): | |
|
112 | addrs.append(blocks[1].split('/')[0]) | |
|
113 | _populate_from_list(addrs) | |
|
114 | ||
|
115 | _ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE) | |
|
116 | ||
|
117 | def _load_ips_ipconfig(): | |
|
118 | """load ip addresses from `ipconfig` output (Windows)""" | |
|
119 | out, err, rc = get_output_error_code('ipconfig') | |
|
120 | if rc: | |
|
121 | raise IOError("no ipconfig: %s" % err) | |
|
122 | ||
|
123 | lines = out.splitlines() | |
|
124 | addrs = [] | |
|
125 | for line in lines: | |
|
126 | m = _ipconfig_ipv4_pat.match(line.strip()) | |
|
127 | if m: | |
|
128 | addrs.append(m.group(1)) | |
|
129 | _populate_from_list(addrs) | |
|
130 | ||
|
131 | ||
|
132 | def _load_ips_netifaces(): | |
|
133 | """load ip addresses with netifaces""" | |
|
134 | import netifaces | |
|
135 | global LOCALHOST | |
|
136 | local_ips = [] | |
|
137 | public_ips = [] | |
|
138 | ||
|
139 | # list of iface names, 'lo0', 'eth0', etc. | |
|
140 | for iface in netifaces.interfaces(): | |
|
141 | # list of ipv4 addrinfo dicts | |
|
142 | ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) | |
|
143 | for entry in ipv4s: | |
|
144 | addr = entry.get('addr') | |
|
145 | if not addr: | |
|
146 | continue | |
|
147 | if not (iface.startswith('lo') or addr.startswith('127.')): | |
|
148 | public_ips.append(addr) | |
|
149 | elif not LOCALHOST: | |
|
150 | LOCALHOST = addr | |
|
151 | local_ips.append(addr) | |
|
152 | if not LOCALHOST: | |
|
153 | # we never found a loopback interface (can this ever happen?), assume common default | |
|
154 | LOCALHOST = '127.0.0.1' | |
|
155 | local_ips.insert(0, LOCALHOST) | |
|
156 | local_ips.extend(['0.0.0.0', '']) | |
|
157 | LOCAL_IPS[:] = uniq_stable(local_ips) | |
|
158 | PUBLIC_IPS[:] = uniq_stable(public_ips) | |
|
159 | ||
|
160 | ||
|
161 | def _load_ips_gethostbyname(): | |
|
162 | """load ip addresses with socket.gethostbyname_ex | |
|
163 | ||
|
164 | This can be slow. | |
|
165 | """ | |
|
166 | global LOCALHOST | |
|
167 | try: | |
|
168 | LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2] | |
|
169 | except socket.error: | |
|
170 | # assume common default | |
|
171 | LOCAL_IPS[:] = ['127.0.0.1'] | |
|
172 | ||
|
173 | try: | |
|
174 | hostname = socket.gethostname() | |
|
175 | PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] | |
|
176 | # try hostname.local, in case hostname has been short-circuited to loopback | |
|
177 | if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): | |
|
178 | PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] | |
|
179 | except socket.error: | |
|
180 | pass | |
|
181 | finally: | |
|
182 | PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS) | |
|
183 | LOCAL_IPS.extend(PUBLIC_IPS) | |
|
184 | ||
|
185 | # include all-interface aliases: 0.0.0.0 and '' | |
|
186 | LOCAL_IPS.extend(['0.0.0.0', '']) | |
|
187 | ||
|
188 | LOCAL_IPS[:] = uniq_stable(LOCAL_IPS) | |
|
189 | ||
|
190 | LOCALHOST = LOCAL_IPS[0] | |
|
191 | ||
|
192 | def _load_ips_dumb(): | |
|
193 | """Fallback in case of unexpected failure""" | |
|
194 | global LOCALHOST | |
|
195 | LOCALHOST = '127.0.0.1' | |
|
196 | LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', ''] | |
|
197 | PUBLIC_IPS[:] = [] | |
|
198 | ||
|
199 | @_only_once | |
|
200 | def _load_ips(suppress_exceptions=True): | |
|
201 | """load the IPs that point to this machine | |
|
202 | ||
|
203 | This function will only ever be called once. | |
|
204 | ||
|
205 | It will use netifaces to do it quickly if available. | |
|
206 | Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate. | |
|
207 | Finally, it will fallback on socket.gethostbyname_ex, which can be slow. | |
|
208 | """ | |
|
209 | ||
|
210 | try: | |
|
211 | # first priority, use netifaces | |
|
212 | try: | |
|
213 | return _load_ips_netifaces() | |
|
214 | except ImportError: | |
|
215 | pass | |
|
216 | ||
|
217 | # second priority, parse subprocess output (how reliable is this?) | |
|
218 | ||
|
219 | if os.name == 'nt': | |
|
220 | try: | |
|
221 | return _load_ips_ipconfig() | |
|
222 | except (IOError, NoIPAddresses): | |
|
223 | pass | |
|
224 | else: | |
|
225 | try: | |
|
226 | return _load_ips_ifconfig() | |
|
227 | except (IOError, NoIPAddresses): | |
|
228 | pass | |
|
229 | try: | |
|
230 | return _load_ips_ip() | |
|
231 | except (IOError, NoIPAddresses): | |
|
232 | pass | |
|
233 | ||
|
234 | # lowest priority, use gethostbyname | |
|
235 | ||
|
236 | return _load_ips_gethostbyname() | |
|
237 | except Exception as e: | |
|
238 | if not suppress_exceptions: | |
|
239 | raise | |
|
240 | # unexpected error shouldn't crash, load dumb default values instead. | |
|
241 | warn("Unexpected error discovering local network interfaces: %s" % e) | |
|
242 | _load_ips_dumb() | |
|
243 | ||
|
244 | ||
|
245 | @_requires_ips | |
|
246 | def local_ips(): | |
|
247 | """return the IP addresses that point to this machine""" | |
|
248 | return LOCAL_IPS | |
|
249 | ||
|
250 | @_requires_ips | |
|
251 | def public_ips(): | |
|
252 | """return the IP addresses for this machine that are visible to other machines""" | |
|
253 | return PUBLIC_IPS | |
|
254 | ||
|
255 | @_requires_ips | |
|
256 | def localhost(): | |
|
257 | """return ip for localhost (almost always 127.0.0.1)""" | |
|
258 | return LOCALHOST | |
|
259 | ||
|
260 | @_requires_ips | |
|
261 | def is_local_ip(ip): | |
|
262 | """does `ip` point to this machine?""" | |
|
263 | return ip in LOCAL_IPS | |
|
264 | ||
|
265 | @_requires_ips | |
|
266 | def is_public_ip(ip): | |
|
267 | """is `ip` a publicly visible address?""" | |
|
268 | return ip in PUBLIC_IPS |
@@ -1,278 +1,5 b'' | |||
|
1 | """Simple utility for building a list of local IPs using the socket module. | |
|
2 | This module defines two constants: | |
|
1 | from warnings import warn | |
|
3 | 2 | |
|
4 | LOCALHOST : The loopback interface, or the first interface that points to this | |
|
5 | machine. It will *almost* always be '127.0.0.1' | |
|
3 | warn("IPython.utils.localinterfaces has moved to jupyter_client.localinterfaces") | |
|
6 | 4 | |
|
7 | LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine. | |
|
8 | This will include LOCALHOST, PUBLIC_IPS, and aliases for all hosts, | |
|
9 | such as '0.0.0.0'. | |
|
10 | ||
|
11 | PUBLIC_IPS : A list of public IP addresses that point to this machine. | |
|
12 | Use these to tell remote clients where to find you. | |
|
13 | """ | |
|
14 | #----------------------------------------------------------------------------- | |
|
15 | # Copyright (C) 2010 The IPython Development Team | |
|
16 | # | |
|
17 | # Distributed under the terms of the BSD License. The full license is in | |
|
18 | # the file COPYING, distributed as part of this software. | |
|
19 | #----------------------------------------------------------------------------- | |
|
20 | ||
|
21 | #----------------------------------------------------------------------------- | |
|
22 | # Imports | |
|
23 | #----------------------------------------------------------------------------- | |
|
24 | ||
|
25 | import os | |
|
26 | import re | |
|
27 | import socket | |
|
28 | ||
|
29 | from .data import uniq_stable | |
|
30 | from .process import get_output_error_code | |
|
31 | from .warn import warn | |
|
32 | ||
|
33 | #----------------------------------------------------------------------------- | |
|
34 | # Code | |
|
35 | #----------------------------------------------------------------------------- | |
|
36 | ||
|
37 | LOCAL_IPS = [] | |
|
38 | PUBLIC_IPS = [] | |
|
39 | ||
|
40 | LOCALHOST = '' | |
|
41 | ||
|
42 | def _only_once(f): | |
|
43 | """decorator to only run a function once""" | |
|
44 | f.called = False | |
|
45 | def wrapped(**kwargs): | |
|
46 | if f.called: | |
|
47 | return | |
|
48 | ret = f(**kwargs) | |
|
49 | f.called = True | |
|
50 | return ret | |
|
51 | return wrapped | |
|
52 | ||
|
53 | def _requires_ips(f): | |
|
54 | """decorator to ensure load_ips has been run before f""" | |
|
55 | def ips_loaded(*args, **kwargs): | |
|
56 | _load_ips() | |
|
57 | return f(*args, **kwargs) | |
|
58 | return ips_loaded | |
|
59 | ||
|
60 | # subprocess-parsing ip finders | |
|
61 | class NoIPAddresses(Exception): | |
|
62 | pass | |
|
63 | ||
|
64 | def _populate_from_list(addrs): | |
|
65 | """populate local and public IPs from flat list of all IPs""" | |
|
66 | if not addrs: | |
|
67 | raise NoIPAddresses | |
|
68 | ||
|
69 | global LOCALHOST | |
|
70 | public_ips = [] | |
|
71 | local_ips = [] | |
|
72 | ||
|
73 | for ip in addrs: | |
|
74 | local_ips.append(ip) | |
|
75 | if not ip.startswith('127.'): | |
|
76 | public_ips.append(ip) | |
|
77 | elif not LOCALHOST: | |
|
78 | LOCALHOST = ip | |
|
79 | ||
|
80 | if not LOCALHOST: | |
|
81 | LOCALHOST = '127.0.0.1' | |
|
82 | local_ips.insert(0, LOCALHOST) | |
|
83 | ||
|
84 | local_ips.extend(['0.0.0.0', '']) | |
|
85 | ||
|
86 | LOCAL_IPS[:] = uniq_stable(local_ips) | |
|
87 | PUBLIC_IPS[:] = uniq_stable(public_ips) | |
|
88 | ||
|
89 | def _load_ips_ifconfig(): | |
|
90 | """load ip addresses from `ifconfig` output (posix)""" | |
|
91 | ||
|
92 | out, err, rc = get_output_error_code('ifconfig') | |
|
93 | if rc: | |
|
94 | # no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH | |
|
95 | out, err, rc = get_output_error_code('/sbin/ifconfig') | |
|
96 | if rc: | |
|
97 | raise IOError("no ifconfig: %s" % err) | |
|
98 | ||
|
99 | lines = out.splitlines() | |
|
100 | addrs = [] | |
|
101 | for line in lines: | |
|
102 | blocks = line.lower().split() | |
|
103 | if (len(blocks) >= 2) and (blocks[0] == 'inet'): | |
|
104 | if blocks[1].startswith("addr:"): | |
|
105 | addrs.append(blocks[1].split(":")[1]) | |
|
106 | else: | |
|
107 | addrs.append(blocks[1]) | |
|
108 | _populate_from_list(addrs) | |
|
109 | ||
|
110 | ||
|
111 | def _load_ips_ip(): | |
|
112 | """load ip addresses from `ip addr` output (Linux)""" | |
|
113 | out, err, rc = get_output_error_code('ip addr') | |
|
114 | if rc: | |
|
115 | raise IOError("no ip: %s" % err) | |
|
116 | ||
|
117 | lines = out.splitlines() | |
|
118 | addrs = [] | |
|
119 | for line in lines: | |
|
120 | blocks = line.lower().split() | |
|
121 | if (len(blocks) >= 2) and (blocks[0] == 'inet'): | |
|
122 | addrs.append(blocks[1].split('/')[0]) | |
|
123 | _populate_from_list(addrs) | |
|
124 | ||
|
125 | _ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE) | |
|
126 | ||
|
127 | def _load_ips_ipconfig(): | |
|
128 | """load ip addresses from `ipconfig` output (Windows)""" | |
|
129 | out, err, rc = get_output_error_code('ipconfig') | |
|
130 | if rc: | |
|
131 | raise IOError("no ipconfig: %s" % err) | |
|
132 | ||
|
133 | lines = out.splitlines() | |
|
134 | addrs = [] | |
|
135 | for line in lines: | |
|
136 | m = _ipconfig_ipv4_pat.match(line.strip()) | |
|
137 | if m: | |
|
138 | addrs.append(m.group(1)) | |
|
139 | _populate_from_list(addrs) | |
|
140 | ||
|
141 | ||
|
142 | def _load_ips_netifaces(): | |
|
143 | """load ip addresses with netifaces""" | |
|
144 | import netifaces | |
|
145 | global LOCALHOST | |
|
146 | local_ips = [] | |
|
147 | public_ips = [] | |
|
148 | ||
|
149 | # list of iface names, 'lo0', 'eth0', etc. | |
|
150 | for iface in netifaces.interfaces(): | |
|
151 | # list of ipv4 addrinfo dicts | |
|
152 | ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) | |
|
153 | for entry in ipv4s: | |
|
154 | addr = entry.get('addr') | |
|
155 | if not addr: | |
|
156 | continue | |
|
157 | if not (iface.startswith('lo') or addr.startswith('127.')): | |
|
158 | public_ips.append(addr) | |
|
159 | elif not LOCALHOST: | |
|
160 | LOCALHOST = addr | |
|
161 | local_ips.append(addr) | |
|
162 | if not LOCALHOST: | |
|
163 | # we never found a loopback interface (can this ever happen?), assume common default | |
|
164 | LOCALHOST = '127.0.0.1' | |
|
165 | local_ips.insert(0, LOCALHOST) | |
|
166 | local_ips.extend(['0.0.0.0', '']) | |
|
167 | LOCAL_IPS[:] = uniq_stable(local_ips) | |
|
168 | PUBLIC_IPS[:] = uniq_stable(public_ips) | |
|
169 | ||
|
170 | ||
|
171 | def _load_ips_gethostbyname(): | |
|
172 | """load ip addresses with socket.gethostbyname_ex | |
|
173 | ||
|
174 | This can be slow. | |
|
175 | """ | |
|
176 | global LOCALHOST | |
|
177 | try: | |
|
178 | LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2] | |
|
179 | except socket.error: | |
|
180 | # assume common default | |
|
181 | LOCAL_IPS[:] = ['127.0.0.1'] | |
|
182 | ||
|
183 | try: | |
|
184 | hostname = socket.gethostname() | |
|
185 | PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] | |
|
186 | # try hostname.local, in case hostname has been short-circuited to loopback | |
|
187 | if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): | |
|
188 | PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] | |
|
189 | except socket.error: | |
|
190 | pass | |
|
191 | finally: | |
|
192 | PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS) | |
|
193 | LOCAL_IPS.extend(PUBLIC_IPS) | |
|
194 | ||
|
195 | # include all-interface aliases: 0.0.0.0 and '' | |
|
196 | LOCAL_IPS.extend(['0.0.0.0', '']) | |
|
197 | ||
|
198 | LOCAL_IPS[:] = uniq_stable(LOCAL_IPS) | |
|
199 | ||
|
200 | LOCALHOST = LOCAL_IPS[0] | |
|
201 | ||
|
202 | def _load_ips_dumb(): | |
|
203 | """Fallback in case of unexpected failure""" | |
|
204 | global LOCALHOST | |
|
205 | LOCALHOST = '127.0.0.1' | |
|
206 | LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', ''] | |
|
207 | PUBLIC_IPS[:] = [] | |
|
208 | ||
|
209 | @_only_once | |
|
210 | def _load_ips(suppress_exceptions=True): | |
|
211 | """load the IPs that point to this machine | |
|
212 | ||
|
213 | This function will only ever be called once. | |
|
214 | ||
|
215 | It will use netifaces to do it quickly if available. | |
|
216 | Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate. | |
|
217 | Finally, it will fallback on socket.gethostbyname_ex, which can be slow. | |
|
218 | """ | |
|
219 | ||
|
220 | try: | |
|
221 | # first priority, use netifaces | |
|
222 | try: | |
|
223 | return _load_ips_netifaces() | |
|
224 | except ImportError: | |
|
225 | pass | |
|
226 | ||
|
227 | # second priority, parse subprocess output (how reliable is this?) | |
|
228 | ||
|
229 | if os.name == 'nt': | |
|
230 | try: | |
|
231 | return _load_ips_ipconfig() | |
|
232 | except (IOError, NoIPAddresses): | |
|
233 | pass | |
|
234 | else: | |
|
235 | try: | |
|
236 | return _load_ips_ifconfig() | |
|
237 | except (IOError, NoIPAddresses): | |
|
238 | pass | |
|
239 | try: | |
|
240 | return _load_ips_ip() | |
|
241 | except (IOError, NoIPAddresses): | |
|
242 | pass | |
|
243 | ||
|
244 | # lowest priority, use gethostbyname | |
|
245 | ||
|
246 | return _load_ips_gethostbyname() | |
|
247 | except Exception as e: | |
|
248 | if not suppress_exceptions: | |
|
249 | raise | |
|
250 | # unexpected error shouldn't crash, load dumb default values instead. | |
|
251 | warn("Unexpected error discovering local network interfaces: %s" % e) | |
|
252 | _load_ips_dumb() | |
|
253 | ||
|
254 | ||
|
255 | @_requires_ips | |
|
256 | def local_ips(): | |
|
257 | """return the IP addresses that point to this machine""" | |
|
258 | return LOCAL_IPS | |
|
259 | ||
|
260 | @_requires_ips | |
|
261 | def public_ips(): | |
|
262 | """return the IP addresses for this machine that are visible to other machines""" | |
|
263 | return PUBLIC_IPS | |
|
264 | ||
|
265 | @_requires_ips | |
|
266 | def localhost(): | |
|
267 | """return ip for localhost (almost always 127.0.0.1)""" | |
|
268 | return LOCALHOST | |
|
269 | ||
|
270 | @_requires_ips | |
|
271 | def is_local_ip(ip): | |
|
272 | """does `ip` point to this machine?""" | |
|
273 | return ip in LOCAL_IPS | |
|
274 | ||
|
275 | @_requires_ips | |
|
276 | def is_public_ip(ip): | |
|
277 | """is `ip` a publicly visible address?""" | |
|
278 | return ip in PUBLIC_IPS | |
|
5 | from jupyter_client.localinterfaces import * |
@@ -1,448 +1,448 b'' | |||
|
1 | 1 | """Utilities for connecting to jupyter kernels |
|
2 | 2 | |
|
3 | 3 | The :class:`ConnectionFileMixin` class in this module encapsulates the logic |
|
4 | 4 | related to writing and reading connections files. |
|
5 | 5 | """ |
|
6 | 6 | |
|
7 | 7 | # Copyright (c) Jupyter Development Team. |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | |
|
10 | 10 | |
|
11 | 11 | from __future__ import absolute_import |
|
12 | 12 | |
|
13 | 13 | import glob |
|
14 | 14 | import json |
|
15 | 15 | import os |
|
16 | 16 | import socket |
|
17 | 17 | from getpass import getpass |
|
18 | 18 | import tempfile |
|
19 | 19 | |
|
20 | 20 | import zmq |
|
21 | 21 | |
|
22 | 22 | from IPython.config import LoggingConfigurable |
|
23 |
from |
|
|
23 | from .localinterfaces import localhost | |
|
24 | 24 | from IPython.utils.path import filefind |
|
25 | 25 | from IPython.utils.py3compat import (str_to_bytes, bytes_to_str, cast_bytes_py2, |
|
26 | 26 | string_types) |
|
27 | 27 | from IPython.utils.traitlets import ( |
|
28 | 28 | Bool, Integer, Unicode, CaselessStrEnum, Instance, |
|
29 | 29 | ) |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0, |
|
33 | 33 | control_port=0, ip='', key=b'', transport='tcp', |
|
34 | 34 | signature_scheme='hmac-sha256', |
|
35 | 35 | ): |
|
36 | 36 | """Generates a JSON config file, including the selection of random ports. |
|
37 | 37 | |
|
38 | 38 | Parameters |
|
39 | 39 | ---------- |
|
40 | 40 | |
|
41 | 41 | fname : unicode |
|
42 | 42 | The path to the file to write |
|
43 | 43 | |
|
44 | 44 | shell_port : int, optional |
|
45 | 45 | The port to use for ROUTER (shell) channel. |
|
46 | 46 | |
|
47 | 47 | iopub_port : int, optional |
|
48 | 48 | The port to use for the SUB channel. |
|
49 | 49 | |
|
50 | 50 | stdin_port : int, optional |
|
51 | 51 | The port to use for the ROUTER (raw input) channel. |
|
52 | 52 | |
|
53 | 53 | control_port : int, optional |
|
54 | 54 | The port to use for the ROUTER (control) channel. |
|
55 | 55 | |
|
56 | 56 | hb_port : int, optional |
|
57 | 57 | The port to use for the heartbeat REP channel. |
|
58 | 58 | |
|
59 | 59 | ip : str, optional |
|
60 | 60 | The ip address the kernel will bind to. |
|
61 | 61 | |
|
62 | 62 | key : str, optional |
|
63 | 63 | The Session key used for message authentication. |
|
64 | 64 | |
|
65 | 65 | signature_scheme : str, optional |
|
66 | 66 | The scheme used for message authentication. |
|
67 | 67 | This has the form 'digest-hash', where 'digest' |
|
68 | 68 | is the scheme used for digests, and 'hash' is the name of the hash function |
|
69 | 69 | used by the digest scheme. |
|
70 | 70 | Currently, 'hmac' is the only supported digest scheme, |
|
71 | 71 | and 'sha256' is the default hash function. |
|
72 | 72 | |
|
73 | 73 | """ |
|
74 | 74 | if not ip: |
|
75 | 75 | ip = localhost() |
|
76 | 76 | # default to temporary connector file |
|
77 | 77 | if not fname: |
|
78 | 78 | fd, fname = tempfile.mkstemp('.json') |
|
79 | 79 | os.close(fd) |
|
80 | 80 | |
|
81 | 81 | # Find open ports as necessary. |
|
82 | 82 | |
|
83 | 83 | ports = [] |
|
84 | 84 | ports_needed = int(shell_port <= 0) + \ |
|
85 | 85 | int(iopub_port <= 0) + \ |
|
86 | 86 | int(stdin_port <= 0) + \ |
|
87 | 87 | int(control_port <= 0) + \ |
|
88 | 88 | int(hb_port <= 0) |
|
89 | 89 | if transport == 'tcp': |
|
90 | 90 | for i in range(ports_needed): |
|
91 | 91 | sock = socket.socket() |
|
92 | 92 | # struct.pack('ii', (0,0)) is 8 null bytes |
|
93 | 93 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8) |
|
94 | 94 | sock.bind(('', 0)) |
|
95 | 95 | ports.append(sock) |
|
96 | 96 | for i, sock in enumerate(ports): |
|
97 | 97 | port = sock.getsockname()[1] |
|
98 | 98 | sock.close() |
|
99 | 99 | ports[i] = port |
|
100 | 100 | else: |
|
101 | 101 | N = 1 |
|
102 | 102 | for i in range(ports_needed): |
|
103 | 103 | while os.path.exists("%s-%s" % (ip, str(N))): |
|
104 | 104 | N += 1 |
|
105 | 105 | ports.append(N) |
|
106 | 106 | N += 1 |
|
107 | 107 | if shell_port <= 0: |
|
108 | 108 | shell_port = ports.pop(0) |
|
109 | 109 | if iopub_port <= 0: |
|
110 | 110 | iopub_port = ports.pop(0) |
|
111 | 111 | if stdin_port <= 0: |
|
112 | 112 | stdin_port = ports.pop(0) |
|
113 | 113 | if control_port <= 0: |
|
114 | 114 | control_port = ports.pop(0) |
|
115 | 115 | if hb_port <= 0: |
|
116 | 116 | hb_port = ports.pop(0) |
|
117 | 117 | |
|
118 | 118 | cfg = dict( shell_port=shell_port, |
|
119 | 119 | iopub_port=iopub_port, |
|
120 | 120 | stdin_port=stdin_port, |
|
121 | 121 | control_port=control_port, |
|
122 | 122 | hb_port=hb_port, |
|
123 | 123 | ) |
|
124 | 124 | cfg['ip'] = ip |
|
125 | 125 | cfg['key'] = bytes_to_str(key) |
|
126 | 126 | cfg['transport'] = transport |
|
127 | 127 | cfg['signature_scheme'] = signature_scheme |
|
128 | 128 | |
|
129 | 129 | with open(fname, 'w') as f: |
|
130 | 130 | f.write(json.dumps(cfg, indent=2)) |
|
131 | 131 | |
|
132 | 132 | return fname, cfg |
|
133 | 133 | |
|
134 | 134 | |
|
135 | 135 | def find_connection_file(filename='kernel-*.json', path=None): |
|
136 | 136 | """find a connection file, and return its absolute path. |
|
137 | 137 | |
|
138 | 138 | The current working directory and the profile's security |
|
139 | 139 | directory will be searched for the file if it is not given by |
|
140 | 140 | absolute path. |
|
141 | 141 | |
|
142 | 142 | If profile is unspecified, then the current running application's |
|
143 | 143 | profile will be used, or 'default', if not run from IPython. |
|
144 | 144 | |
|
145 | 145 | If the argument does not match an existing file, it will be interpreted as a |
|
146 | 146 | fileglob, and the matching file in the profile's security dir with |
|
147 | 147 | the latest access time will be used. |
|
148 | 148 | |
|
149 | 149 | Parameters |
|
150 | 150 | ---------- |
|
151 | 151 | filename : str |
|
152 | 152 | The connection file or fileglob to search for. |
|
153 | 153 | path : str or list of strs[optional] |
|
154 | 154 | Paths in which to search for connection files. |
|
155 | 155 | |
|
156 | 156 | Returns |
|
157 | 157 | ------- |
|
158 | 158 | str : The absolute path of the connection file. |
|
159 | 159 | """ |
|
160 | 160 | if path is None: |
|
161 | 161 | path = ['.'] |
|
162 | 162 | if isinstance(path, string_types): |
|
163 | 163 | path = [path] |
|
164 | 164 | |
|
165 | 165 | try: |
|
166 | 166 | # first, try explicit name |
|
167 | 167 | return filefind(filename, path) |
|
168 | 168 | except IOError: |
|
169 | 169 | pass |
|
170 | 170 | |
|
171 | 171 | # not found by full name |
|
172 | 172 | |
|
173 | 173 | if '*' in filename: |
|
174 | 174 | # given as a glob already |
|
175 | 175 | pat = filename |
|
176 | 176 | else: |
|
177 | 177 | # accept any substring match |
|
178 | 178 | pat = '*%s*' % filename |
|
179 | 179 | |
|
180 | 180 | matches = [] |
|
181 | 181 | for p in path: |
|
182 | 182 | matches.extend(glob.glob(os.path.join(p, pat))) |
|
183 | 183 | |
|
184 | 184 | if not matches: |
|
185 | 185 | raise IOError("Could not find %r in %r" % (filename, path)) |
|
186 | 186 | elif len(matches) == 1: |
|
187 | 187 | return matches[0] |
|
188 | 188 | else: |
|
189 | 189 | # get most recent match, by access time: |
|
190 | 190 | return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1] |
|
191 | 191 | |
|
192 | 192 | |
|
193 | 193 | def tunnel_to_kernel(connection_info, sshserver, sshkey=None): |
|
194 | 194 | """tunnel connections to a kernel via ssh |
|
195 | 195 | |
|
196 | 196 | This will open four SSH tunnels from localhost on this machine to the |
|
197 | 197 | ports associated with the kernel. They can be either direct |
|
198 | 198 | localhost-localhost tunnels, or if an intermediate server is necessary, |
|
199 | 199 | the kernel must be listening on a public IP. |
|
200 | 200 | |
|
201 | 201 | Parameters |
|
202 | 202 | ---------- |
|
203 | 203 | connection_info : dict or str (path) |
|
204 | 204 | Either a connection dict, or the path to a JSON connection file |
|
205 | 205 | sshserver : str |
|
206 | 206 | The ssh sever to use to tunnel to the kernel. Can be a full |
|
207 | 207 | `user@server:port` string. ssh config aliases are respected. |
|
208 | 208 | sshkey : str [optional] |
|
209 | 209 | Path to file containing ssh key to use for authentication. |
|
210 | 210 | Only necessary if your ssh config does not already associate |
|
211 | 211 | a keyfile with the host. |
|
212 | 212 | |
|
213 | 213 | Returns |
|
214 | 214 | ------- |
|
215 | 215 | |
|
216 | 216 | (shell, iopub, stdin, hb) : ints |
|
217 | 217 | The four ports on localhost that have been forwarded to the kernel. |
|
218 | 218 | """ |
|
219 | 219 | from zmq.ssh import tunnel |
|
220 | 220 | if isinstance(connection_info, string_types): |
|
221 | 221 | # it's a path, unpack it |
|
222 | 222 | with open(connection_info) as f: |
|
223 | 223 | connection_info = json.loads(f.read()) |
|
224 | 224 | |
|
225 | 225 | cf = connection_info |
|
226 | 226 | |
|
227 | 227 | lports = tunnel.select_random_ports(4) |
|
228 | 228 | rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'] |
|
229 | 229 | |
|
230 | 230 | remote_ip = cf['ip'] |
|
231 | 231 | |
|
232 | 232 | if tunnel.try_passwordless_ssh(sshserver, sshkey): |
|
233 | 233 | password=False |
|
234 | 234 | else: |
|
235 | 235 | password = getpass("SSH Password for %s: " % cast_bytes_py2(sshserver)) |
|
236 | 236 | |
|
237 | 237 | for lp,rp in zip(lports, rports): |
|
238 | 238 | tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password) |
|
239 | 239 | |
|
240 | 240 | return tuple(lports) |
|
241 | 241 | |
|
242 | 242 | |
|
243 | 243 | #----------------------------------------------------------------------------- |
|
244 | 244 | # Mixin for classes that work with connection files |
|
245 | 245 | #----------------------------------------------------------------------------- |
|
246 | 246 | |
|
247 | 247 | channel_socket_types = { |
|
248 | 248 | 'hb' : zmq.REQ, |
|
249 | 249 | 'shell' : zmq.DEALER, |
|
250 | 250 | 'iopub' : zmq.SUB, |
|
251 | 251 | 'stdin' : zmq.DEALER, |
|
252 | 252 | 'control': zmq.DEALER, |
|
253 | 253 | } |
|
254 | 254 | |
|
255 | 255 | port_names = [ "%s_port" % channel for channel in ('shell', 'stdin', 'iopub', 'hb', 'control')] |
|
256 | 256 | |
|
257 | 257 | class ConnectionFileMixin(LoggingConfigurable): |
|
258 | 258 | """Mixin for configurable classes that work with connection files""" |
|
259 | 259 | |
|
260 | 260 | # The addresses for the communication channels |
|
261 | 261 | connection_file = Unicode('', config=True, |
|
262 | 262 | help="""JSON file in which to store connection info [default: kernel-<pid>.json] |
|
263 | 263 | |
|
264 | 264 | This file will contain the IP, ports, and authentication key needed to connect |
|
265 | 265 | clients to this kernel. By default, this file will be created in the security dir |
|
266 | 266 | of the current profile, but can be specified by absolute path. |
|
267 | 267 | """) |
|
268 | 268 | _connection_file_written = Bool(False) |
|
269 | 269 | |
|
270 | 270 | transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True) |
|
271 | 271 | |
|
272 | 272 | ip = Unicode(config=True, |
|
273 | 273 | help="""Set the kernel\'s IP address [default localhost]. |
|
274 | 274 | If the IP address is something other than localhost, then |
|
275 | 275 | Consoles on other machines will be able to connect |
|
276 | 276 | to the Kernel, so be careful!""" |
|
277 | 277 | ) |
|
278 | 278 | |
|
279 | 279 | def _ip_default(self): |
|
280 | 280 | if self.transport == 'ipc': |
|
281 | 281 | if self.connection_file: |
|
282 | 282 | return os.path.splitext(self.connection_file)[0] + '-ipc' |
|
283 | 283 | else: |
|
284 | 284 | return 'kernel-ipc' |
|
285 | 285 | else: |
|
286 | 286 | return localhost() |
|
287 | 287 | |
|
288 | 288 | def _ip_changed(self, name, old, new): |
|
289 | 289 | if new == '*': |
|
290 | 290 | self.ip = '0.0.0.0' |
|
291 | 291 | |
|
292 | 292 | # protected traits |
|
293 | 293 | |
|
294 | 294 | hb_port = Integer(0, config=True, |
|
295 | 295 | help="set the heartbeat port [default: random]") |
|
296 | 296 | shell_port = Integer(0, config=True, |
|
297 | 297 | help="set the shell (ROUTER) port [default: random]") |
|
298 | 298 | iopub_port = Integer(0, config=True, |
|
299 | 299 | help="set the iopub (PUB) port [default: random]") |
|
300 | 300 | stdin_port = Integer(0, config=True, |
|
301 | 301 | help="set the stdin (ROUTER) port [default: random]") |
|
302 | 302 | control_port = Integer(0, config=True, |
|
303 | 303 | help="set the control (ROUTER) port [default: random]") |
|
304 | 304 | |
|
305 | 305 | @property |
|
306 | 306 | def ports(self): |
|
307 | 307 | return [ getattr(self, name) for name in port_names ] |
|
308 | 308 | |
|
309 | 309 | # The Session to use for communication with the kernel. |
|
310 | 310 | session = Instance('jupyter_client.session.Session') |
|
311 | 311 | def _session_default(self): |
|
312 | 312 | from jupyter_client.session import Session |
|
313 | 313 | return Session(parent=self) |
|
314 | 314 | |
|
315 | 315 | #-------------------------------------------------------------------------- |
|
316 | 316 | # Connection and ipc file management |
|
317 | 317 | #-------------------------------------------------------------------------- |
|
318 | 318 | |
|
319 | 319 | def get_connection_info(self): |
|
320 | 320 | """return the connection info as a dict""" |
|
321 | 321 | return dict( |
|
322 | 322 | transport=self.transport, |
|
323 | 323 | ip=self.ip, |
|
324 | 324 | shell_port=self.shell_port, |
|
325 | 325 | iopub_port=self.iopub_port, |
|
326 | 326 | stdin_port=self.stdin_port, |
|
327 | 327 | hb_port=self.hb_port, |
|
328 | 328 | control_port=self.control_port, |
|
329 | 329 | signature_scheme=self.session.signature_scheme, |
|
330 | 330 | key=self.session.key, |
|
331 | 331 | ) |
|
332 | 332 | |
|
333 | 333 | def cleanup_connection_file(self): |
|
334 | 334 | """Cleanup connection file *if we wrote it* |
|
335 | 335 | |
|
336 | 336 | Will not raise if the connection file was already removed somehow. |
|
337 | 337 | """ |
|
338 | 338 | if self._connection_file_written: |
|
339 | 339 | # cleanup connection files on full shutdown of kernel we started |
|
340 | 340 | self._connection_file_written = False |
|
341 | 341 | try: |
|
342 | 342 | os.remove(self.connection_file) |
|
343 | 343 | except (IOError, OSError, AttributeError): |
|
344 | 344 | pass |
|
345 | 345 | |
|
346 | 346 | def cleanup_ipc_files(self): |
|
347 | 347 | """Cleanup ipc files if we wrote them.""" |
|
348 | 348 | if self.transport != 'ipc': |
|
349 | 349 | return |
|
350 | 350 | for port in self.ports: |
|
351 | 351 | ipcfile = "%s-%i" % (self.ip, port) |
|
352 | 352 | try: |
|
353 | 353 | os.remove(ipcfile) |
|
354 | 354 | except (IOError, OSError): |
|
355 | 355 | pass |
|
356 | 356 | |
|
357 | 357 | def write_connection_file(self): |
|
358 | 358 | """Write connection info to JSON dict in self.connection_file.""" |
|
359 | 359 | if self._connection_file_written and os.path.exists(self.connection_file): |
|
360 | 360 | return |
|
361 | 361 | |
|
362 | 362 | self.connection_file, cfg = write_connection_file(self.connection_file, |
|
363 | 363 | transport=self.transport, ip=self.ip, key=self.session.key, |
|
364 | 364 | stdin_port=self.stdin_port, iopub_port=self.iopub_port, |
|
365 | 365 | shell_port=self.shell_port, hb_port=self.hb_port, |
|
366 | 366 | control_port=self.control_port, |
|
367 | 367 | signature_scheme=self.session.signature_scheme, |
|
368 | 368 | ) |
|
369 | 369 | # write_connection_file also sets default ports: |
|
370 | 370 | for name in port_names: |
|
371 | 371 | setattr(self, name, cfg[name]) |
|
372 | 372 | |
|
373 | 373 | self._connection_file_written = True |
|
374 | 374 | |
|
375 | 375 | def load_connection_file(self): |
|
376 | 376 | """Load connection info from JSON dict in self.connection_file.""" |
|
377 | 377 | self.log.debug(u"Loading connection file %s", self.connection_file) |
|
378 | 378 | with open(self.connection_file) as f: |
|
379 | 379 | cfg = json.load(f) |
|
380 | 380 | self.transport = cfg.get('transport', self.transport) |
|
381 | 381 | self.ip = cfg.get('ip', self._ip_default()) |
|
382 | 382 | |
|
383 | 383 | for name in port_names: |
|
384 | 384 | if getattr(self, name) == 0 and name in cfg: |
|
385 | 385 | # not overridden by config or cl_args |
|
386 | 386 | setattr(self, name, cfg[name]) |
|
387 | 387 | |
|
388 | 388 | if 'key' in cfg: |
|
389 | 389 | self.session.key = str_to_bytes(cfg['key']) |
|
390 | 390 | if 'signature_scheme' in cfg: |
|
391 | 391 | self.session.signature_scheme = cfg['signature_scheme'] |
|
392 | 392 | |
|
393 | 393 | #-------------------------------------------------------------------------- |
|
394 | 394 | # Creating connected sockets |
|
395 | 395 | #-------------------------------------------------------------------------- |
|
396 | 396 | |
|
397 | 397 | def _make_url(self, channel): |
|
398 | 398 | """Make a ZeroMQ URL for a given channel.""" |
|
399 | 399 | transport = self.transport |
|
400 | 400 | ip = self.ip |
|
401 | 401 | port = getattr(self, '%s_port' % channel) |
|
402 | 402 | |
|
403 | 403 | if transport == 'tcp': |
|
404 | 404 | return "tcp://%s:%i" % (ip, port) |
|
405 | 405 | else: |
|
406 | 406 | return "%s://%s-%s" % (transport, ip, port) |
|
407 | 407 | |
|
408 | 408 | def _create_connected_socket(self, channel, identity=None): |
|
409 | 409 | """Create a zmq Socket and connect it to the kernel.""" |
|
410 | 410 | url = self._make_url(channel) |
|
411 | 411 | socket_type = channel_socket_types[channel] |
|
412 | 412 | self.log.debug("Connecting to: %s" % url) |
|
413 | 413 | sock = self.context.socket(socket_type) |
|
414 | 414 | # set linger to 1s to prevent hangs at exit |
|
415 | 415 | sock.linger = 1000 |
|
416 | 416 | if identity: |
|
417 | 417 | sock.identity = identity |
|
418 | 418 | sock.connect(url) |
|
419 | 419 | return sock |
|
420 | 420 | |
|
421 | 421 | def connect_iopub(self, identity=None): |
|
422 | 422 | """return zmq Socket connected to the IOPub channel""" |
|
423 | 423 | sock = self._create_connected_socket('iopub', identity=identity) |
|
424 | 424 | sock.setsockopt(zmq.SUBSCRIBE, b'') |
|
425 | 425 | return sock |
|
426 | 426 | |
|
427 | 427 | def connect_shell(self, identity=None): |
|
428 | 428 | """return zmq Socket connected to the Shell channel""" |
|
429 | 429 | return self._create_connected_socket('shell', identity=identity) |
|
430 | 430 | |
|
431 | 431 | def connect_stdin(self, identity=None): |
|
432 | 432 | """return zmq Socket connected to the StdIn channel""" |
|
433 | 433 | return self._create_connected_socket('stdin', identity=identity) |
|
434 | 434 | |
|
435 | 435 | def connect_hb(self, identity=None): |
|
436 | 436 | """return zmq Socket connected to the Heartbeat channel""" |
|
437 | 437 | return self._create_connected_socket('hb', identity=identity) |
|
438 | 438 | |
|
439 | 439 | def connect_control(self, identity=None): |
|
440 | 440 | """return zmq Socket connected to the Control channel""" |
|
441 | 441 | return self._create_connected_socket('control', identity=identity) |
|
442 | 442 | |
|
443 | 443 | |
|
444 | 444 | __all__ = [ |
|
445 | 445 | 'write_connection_file', |
|
446 | 446 | 'find_connection_file', |
|
447 | 447 | 'tunnel_to_kernel', |
|
448 | 448 | ] |
@@ -1,331 +1,331 b'' | |||
|
1 | 1 | """ A minimal application base mixin for all ZMQ based IPython frontends. |
|
2 | 2 | |
|
3 | 3 | This is not a complete console app, as subprocess will not be able to receive |
|
4 | 4 | input, there is no real readline support, among other limitations. This is a |
|
5 | 5 | refactoring of what used to be the IPython/qt/console/qtconsoleapp.py |
|
6 | 6 | """ |
|
7 | 7 | # Copyright (c) IPython Development Team. |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | |
|
10 | 10 | import atexit |
|
11 | 11 | import os |
|
12 | 12 | import signal |
|
13 | 13 | import sys |
|
14 | 14 | import uuid |
|
15 | 15 | |
|
16 | 16 | |
|
17 | 17 | from IPython.config.application import boolean_flag |
|
18 | 18 | from IPython.core.profiledir import ProfileDir |
|
19 | 19 | from IPython.utils.path import filefind |
|
20 | 20 | from IPython.utils.traitlets import ( |
|
21 | 21 | Dict, List, Unicode, CUnicode, CBool, Any |
|
22 | 22 | ) |
|
23 | 23 | |
|
24 | 24 | from .blocking import BlockingKernelClient |
|
25 | 25 | from . import KernelManager, tunnel_to_kernel, find_connection_file, connect |
|
26 | 26 | from .kernelspec import NoSuchKernel |
|
27 | 27 | from .session import Session |
|
28 | 28 | |
|
29 | 29 | ConnectionFileMixin = connect.ConnectionFileMixin |
|
30 | 30 | |
|
31 |
from |
|
|
31 | from .localinterfaces import localhost | |
|
32 | 32 | |
|
33 | 33 | #----------------------------------------------------------------------------- |
|
34 | 34 | # Aliases and Flags |
|
35 | 35 | #----------------------------------------------------------------------------- |
|
36 | 36 | |
|
37 | 37 | flags = {} |
|
38 | 38 | |
|
39 | 39 | # the flags that are specific to the frontend |
|
40 | 40 | # these must be scrubbed before being passed to the kernel, |
|
41 | 41 | # or it will raise an error on unrecognized flags |
|
42 | 42 | app_flags = { |
|
43 | 43 | 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}}, |
|
44 | 44 | "Connect to an existing kernel. If no argument specified, guess most recent"), |
|
45 | 45 | } |
|
46 | 46 | app_flags.update(boolean_flag( |
|
47 | 47 | 'confirm-exit', 'IPythonConsoleApp.confirm_exit', |
|
48 | 48 | """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
49 | 49 | to force a direct exit without any confirmation. |
|
50 | 50 | """, |
|
51 | 51 | """Don't prompt the user when exiting. This will terminate the kernel |
|
52 | 52 | if it is owned by the frontend, and leave it alive if it is external. |
|
53 | 53 | """ |
|
54 | 54 | )) |
|
55 | 55 | flags.update(app_flags) |
|
56 | 56 | |
|
57 | 57 | aliases = {} |
|
58 | 58 | |
|
59 | 59 | # also scrub aliases from the frontend |
|
60 | 60 | app_aliases = dict( |
|
61 | 61 | ip = 'IPythonConsoleApp.ip', |
|
62 | 62 | transport = 'IPythonConsoleApp.transport', |
|
63 | 63 | hb = 'IPythonConsoleApp.hb_port', |
|
64 | 64 | shell = 'IPythonConsoleApp.shell_port', |
|
65 | 65 | iopub = 'IPythonConsoleApp.iopub_port', |
|
66 | 66 | stdin = 'IPythonConsoleApp.stdin_port', |
|
67 | 67 | existing = 'IPythonConsoleApp.existing', |
|
68 | 68 | f = 'IPythonConsoleApp.connection_file', |
|
69 | 69 | |
|
70 | 70 | kernel = 'IPythonConsoleApp.kernel_name', |
|
71 | 71 | |
|
72 | 72 | ssh = 'IPythonConsoleApp.sshserver', |
|
73 | 73 | ) |
|
74 | 74 | aliases.update(app_aliases) |
|
75 | 75 | |
|
76 | 76 | #----------------------------------------------------------------------------- |
|
77 | 77 | # Classes |
|
78 | 78 | #----------------------------------------------------------------------------- |
|
79 | 79 | |
|
80 | 80 | classes = [KernelManager, ProfileDir, Session] |
|
81 | 81 | |
|
82 | 82 | class IPythonConsoleApp(ConnectionFileMixin): |
|
83 | 83 | name = 'ipython-console-mixin' |
|
84 | 84 | |
|
85 | 85 | description = """ |
|
86 | 86 | The IPython Mixin Console. |
|
87 | 87 | |
|
88 | 88 | This class contains the common portions of console client (QtConsole, |
|
89 | 89 | ZMQ-based terminal console, etc). It is not a full console, in that |
|
90 | 90 | launched terminal subprocesses will not be able to accept input. |
|
91 | 91 | |
|
92 | 92 | The Console using this mixing supports various extra features beyond |
|
93 | 93 | the single-process Terminal IPython shell, such as connecting to |
|
94 | 94 | existing kernel, via: |
|
95 | 95 | |
|
96 | 96 | ipython <appname> --existing |
|
97 | 97 | |
|
98 | 98 | as well as tunnel via SSH |
|
99 | 99 | |
|
100 | 100 | """ |
|
101 | 101 | |
|
102 | 102 | classes = classes |
|
103 | 103 | flags = Dict(flags) |
|
104 | 104 | aliases = Dict(aliases) |
|
105 | 105 | kernel_manager_class = KernelManager |
|
106 | 106 | kernel_client_class = BlockingKernelClient |
|
107 | 107 | |
|
108 | 108 | kernel_argv = List(Unicode) |
|
109 | 109 | # frontend flags&aliases to be stripped when building kernel_argv |
|
110 | 110 | frontend_flags = Any(app_flags) |
|
111 | 111 | frontend_aliases = Any(app_aliases) |
|
112 | 112 | |
|
113 | 113 | # create requested profiles by default, if they don't exist: |
|
114 | 114 | auto_create = CBool(True) |
|
115 | 115 | # connection info: |
|
116 | 116 | |
|
117 | 117 | sshserver = Unicode('', config=True, |
|
118 | 118 | help="""The SSH server to use to connect to the kernel.""") |
|
119 | 119 | sshkey = Unicode('', config=True, |
|
120 | 120 | help="""Path to the ssh key to use for logging in to the ssh server.""") |
|
121 | 121 | |
|
122 | 122 | def _connection_file_default(self): |
|
123 | 123 | return 'kernel-%i.json' % os.getpid() |
|
124 | 124 | |
|
125 | 125 | existing = CUnicode('', config=True, |
|
126 | 126 | help="""Connect to an already running kernel""") |
|
127 | 127 | |
|
128 | 128 | kernel_name = Unicode('python', config=True, |
|
129 | 129 | help="""The name of the default kernel to start.""") |
|
130 | 130 | |
|
131 | 131 | confirm_exit = CBool(True, config=True, |
|
132 | 132 | help=""" |
|
133 | 133 | Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
134 | 134 | to force a direct exit without any confirmation.""", |
|
135 | 135 | ) |
|
136 | 136 | |
|
137 | 137 | def build_kernel_argv(self, argv=None): |
|
138 | 138 | """build argv to be passed to kernel subprocess |
|
139 | 139 | |
|
140 | 140 | Override in subclasses if any args should be passed to the kernel |
|
141 | 141 | """ |
|
142 | 142 | self.kernel_argv = self.extra_args |
|
143 | 143 | |
|
144 | 144 | def init_connection_file(self): |
|
145 | 145 | """find the connection file, and load the info if found. |
|
146 | 146 | |
|
147 | 147 | The current working directory and the current profile's security |
|
148 | 148 | directory will be searched for the file if it is not given by |
|
149 | 149 | absolute path. |
|
150 | 150 | |
|
151 | 151 | When attempting to connect to an existing kernel and the `--existing` |
|
152 | 152 | argument does not match an existing file, it will be interpreted as a |
|
153 | 153 | fileglob, and the matching file in the current profile's security dir |
|
154 | 154 | with the latest access time will be used. |
|
155 | 155 | |
|
156 | 156 | After this method is called, self.connection_file contains the *full path* |
|
157 | 157 | to the connection file, never just its name. |
|
158 | 158 | """ |
|
159 | 159 | if self.existing: |
|
160 | 160 | try: |
|
161 | 161 | cf = find_connection_file(self.existing) |
|
162 | 162 | except Exception: |
|
163 | 163 | self.log.critical("Could not find existing kernel connection file %s", self.existing) |
|
164 | 164 | self.exit(1) |
|
165 | 165 | self.log.debug("Connecting to existing kernel: %s" % cf) |
|
166 | 166 | self.connection_file = cf |
|
167 | 167 | else: |
|
168 | 168 | # not existing, check if we are going to write the file |
|
169 | 169 | # and ensure that self.connection_file is a full path, not just the shortname |
|
170 | 170 | try: |
|
171 | 171 | cf = find_connection_file(self.connection_file) |
|
172 | 172 | except Exception: |
|
173 | 173 | # file might not exist |
|
174 | 174 | if self.connection_file == os.path.basename(self.connection_file): |
|
175 | 175 | # just shortname, put it in security dir |
|
176 | 176 | cf = os.path.join(self.profile_dir.security_dir, self.connection_file) |
|
177 | 177 | else: |
|
178 | 178 | cf = self.connection_file |
|
179 | 179 | self.connection_file = cf |
|
180 | 180 | try: |
|
181 | 181 | self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir]) |
|
182 | 182 | except IOError: |
|
183 | 183 | self.log.debug("Connection File not found: %s", self.connection_file) |
|
184 | 184 | return |
|
185 | 185 | |
|
186 | 186 | # should load_connection_file only be used for existing? |
|
187 | 187 | # as it is now, this allows reusing ports if an existing |
|
188 | 188 | # file is requested |
|
189 | 189 | try: |
|
190 | 190 | self.load_connection_file() |
|
191 | 191 | except Exception: |
|
192 | 192 | self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) |
|
193 | 193 | self.exit(1) |
|
194 | 194 | |
|
195 | 195 | def init_ssh(self): |
|
196 | 196 | """set up ssh tunnels, if needed.""" |
|
197 | 197 | if not self.existing or (not self.sshserver and not self.sshkey): |
|
198 | 198 | return |
|
199 | 199 | self.load_connection_file() |
|
200 | 200 | |
|
201 | 201 | transport = self.transport |
|
202 | 202 | ip = self.ip |
|
203 | 203 | |
|
204 | 204 | if transport != 'tcp': |
|
205 | 205 | self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) |
|
206 | 206 | sys.exit(-1) |
|
207 | 207 | |
|
208 | 208 | if self.sshkey and not self.sshserver: |
|
209 | 209 | # specifying just the key implies that we are connecting directly |
|
210 | 210 | self.sshserver = ip |
|
211 | 211 | ip = localhost() |
|
212 | 212 | |
|
213 | 213 | # build connection dict for tunnels: |
|
214 | 214 | info = dict(ip=ip, |
|
215 | 215 | shell_port=self.shell_port, |
|
216 | 216 | iopub_port=self.iopub_port, |
|
217 | 217 | stdin_port=self.stdin_port, |
|
218 | 218 | hb_port=self.hb_port |
|
219 | 219 | ) |
|
220 | 220 | |
|
221 | 221 | self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver)) |
|
222 | 222 | |
|
223 | 223 | # tunnels return a new set of ports, which will be on localhost: |
|
224 | 224 | self.ip = localhost() |
|
225 | 225 | try: |
|
226 | 226 | newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) |
|
227 | 227 | except: |
|
228 | 228 | # even catch KeyboardInterrupt |
|
229 | 229 | self.log.error("Could not setup tunnels", exc_info=True) |
|
230 | 230 | self.exit(1) |
|
231 | 231 | |
|
232 | 232 | self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports |
|
233 | 233 | |
|
234 | 234 | cf = self.connection_file |
|
235 | 235 | base,ext = os.path.splitext(cf) |
|
236 | 236 | base = os.path.basename(base) |
|
237 | 237 | self.connection_file = os.path.basename(base)+'-ssh'+ext |
|
238 | 238 | self.log.info("To connect another client via this tunnel, use:") |
|
239 | 239 | self.log.info("--existing %s" % self.connection_file) |
|
240 | 240 | |
|
241 | 241 | def _new_connection_file(self): |
|
242 | 242 | cf = '' |
|
243 | 243 | while not cf: |
|
244 | 244 | # we don't need a 128b id to distinguish kernels, use more readable |
|
245 | 245 | # 48b node segment (12 hex chars). Users running more than 32k simultaneous |
|
246 | 246 | # kernels can subclass. |
|
247 | 247 | ident = str(uuid.uuid4()).split('-')[-1] |
|
248 | 248 | cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident) |
|
249 | 249 | # only keep if it's actually new. Protect against unlikely collision |
|
250 | 250 | # in 48b random search space |
|
251 | 251 | cf = cf if not os.path.exists(cf) else '' |
|
252 | 252 | return cf |
|
253 | 253 | |
|
254 | 254 | def init_kernel_manager(self): |
|
255 | 255 | # Don't let Qt or ZMQ swallow KeyboardInterupts. |
|
256 | 256 | if self.existing: |
|
257 | 257 | self.kernel_manager = None |
|
258 | 258 | return |
|
259 | 259 | signal.signal(signal.SIGINT, signal.SIG_DFL) |
|
260 | 260 | |
|
261 | 261 | # Create a KernelManager and start a kernel. |
|
262 | 262 | try: |
|
263 | 263 | self.kernel_manager = self.kernel_manager_class( |
|
264 | 264 | ip=self.ip, |
|
265 | 265 | session=self.session, |
|
266 | 266 | transport=self.transport, |
|
267 | 267 | shell_port=self.shell_port, |
|
268 | 268 | iopub_port=self.iopub_port, |
|
269 | 269 | stdin_port=self.stdin_port, |
|
270 | 270 | hb_port=self.hb_port, |
|
271 | 271 | connection_file=self.connection_file, |
|
272 | 272 | kernel_name=self.kernel_name, |
|
273 | 273 | parent=self, |
|
274 | 274 | ipython_dir=self.ipython_dir, |
|
275 | 275 | ) |
|
276 | 276 | except NoSuchKernel: |
|
277 | 277 | self.log.critical("Could not find kernel %s", self.kernel_name) |
|
278 | 278 | self.exit(1) |
|
279 | 279 | |
|
280 | 280 | self.kernel_manager.client_factory = self.kernel_client_class |
|
281 | 281 | # FIXME: remove special treatment of IPython kernels |
|
282 | 282 | kwargs = {} |
|
283 | 283 | if self.kernel_manager.ipython_kernel: |
|
284 | 284 | kwargs['extra_arguments'] = self.kernel_argv |
|
285 | 285 | self.kernel_manager.start_kernel(**kwargs) |
|
286 | 286 | atexit.register(self.kernel_manager.cleanup_ipc_files) |
|
287 | 287 | |
|
288 | 288 | if self.sshserver: |
|
289 | 289 | # ssh, write new connection file |
|
290 | 290 | self.kernel_manager.write_connection_file() |
|
291 | 291 | |
|
292 | 292 | # in case KM defaults / ssh writing changes things: |
|
293 | 293 | km = self.kernel_manager |
|
294 | 294 | self.shell_port=km.shell_port |
|
295 | 295 | self.iopub_port=km.iopub_port |
|
296 | 296 | self.stdin_port=km.stdin_port |
|
297 | 297 | self.hb_port=km.hb_port |
|
298 | 298 | self.connection_file = km.connection_file |
|
299 | 299 | |
|
300 | 300 | atexit.register(self.kernel_manager.cleanup_connection_file) |
|
301 | 301 | |
|
302 | 302 | def init_kernel_client(self): |
|
303 | 303 | if self.kernel_manager is not None: |
|
304 | 304 | self.kernel_client = self.kernel_manager.client() |
|
305 | 305 | else: |
|
306 | 306 | self.kernel_client = self.kernel_client_class( |
|
307 | 307 | session=self.session, |
|
308 | 308 | ip=self.ip, |
|
309 | 309 | transport=self.transport, |
|
310 | 310 | shell_port=self.shell_port, |
|
311 | 311 | iopub_port=self.iopub_port, |
|
312 | 312 | stdin_port=self.stdin_port, |
|
313 | 313 | hb_port=self.hb_port, |
|
314 | 314 | connection_file=self.connection_file, |
|
315 | 315 | parent=self, |
|
316 | 316 | ) |
|
317 | 317 | |
|
318 | 318 | self.kernel_client.start_channels() |
|
319 | 319 | |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | def initialize(self, argv=None): |
|
323 | 323 | """ |
|
324 | 324 | Classes which mix this class in should call: |
|
325 | 325 | IPythonConsoleApp.initialize(self,argv) |
|
326 | 326 | """ |
|
327 | 327 | self.init_connection_file() |
|
328 | 328 | self.init_ssh() |
|
329 | 329 | self.init_kernel_manager() |
|
330 | 330 | self.init_kernel_client() |
|
331 | 331 |
@@ -1,442 +1,442 b'' | |||
|
1 | 1 | """Base class to manage a running kernel""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | from __future__ import absolute_import |
|
7 | 7 | |
|
8 | 8 | from contextlib import contextmanager |
|
9 | 9 | import os |
|
10 | 10 | import re |
|
11 | 11 | import signal |
|
12 | 12 | import sys |
|
13 | 13 | import time |
|
14 | 14 | import warnings |
|
15 | 15 | try: |
|
16 | 16 | from queue import Empty # Py 3 |
|
17 | 17 | except ImportError: |
|
18 | 18 | from Queue import Empty # Py 2 |
|
19 | 19 | |
|
20 | 20 | import zmq |
|
21 | 21 | |
|
22 | 22 | from IPython.utils.importstring import import_item |
|
23 |
from |
|
|
23 | from .localinterfaces import is_local_ip, local_ips | |
|
24 | 24 | from IPython.utils.path import get_ipython_dir |
|
25 | 25 | from IPython.utils.traitlets import ( |
|
26 | 26 | Any, Instance, Unicode, List, Bool, Type, DottedObjectName |
|
27 | 27 | ) |
|
28 | 28 | from jupyter_client import ( |
|
29 | 29 | launch_kernel, |
|
30 | 30 | kernelspec, |
|
31 | 31 | ) |
|
32 | 32 | from .connect import ConnectionFileMixin |
|
33 | 33 | from .session import Session |
|
34 | 34 | from .managerabc import ( |
|
35 | 35 | KernelManagerABC |
|
36 | 36 | ) |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | class KernelManager(ConnectionFileMixin): |
|
40 | 40 | """Manages a single kernel in a subprocess on this host. |
|
41 | 41 | |
|
42 | 42 | This version starts kernels with Popen. |
|
43 | 43 | """ |
|
44 | 44 | |
|
45 | 45 | # The PyZMQ Context to use for communication with the kernel. |
|
46 | 46 | context = Instance(zmq.Context) |
|
47 | 47 | def _context_default(self): |
|
48 | 48 | return zmq.Context.instance() |
|
49 | 49 | |
|
50 | 50 | # the class to create with our `client` method |
|
51 | 51 | client_class = DottedObjectName('jupyter_client.blocking.BlockingKernelClient') |
|
52 | 52 | client_factory = Type(allow_none=True) |
|
53 | 53 | def _client_class_changed(self, name, old, new): |
|
54 | 54 | self.client_factory = import_item(str(new)) |
|
55 | 55 | |
|
56 | 56 | # The kernel process with which the KernelManager is communicating. |
|
57 | 57 | # generally a Popen instance |
|
58 | 58 | kernel = Any() |
|
59 | 59 | |
|
60 | 60 | kernel_spec_manager = Instance(kernelspec.KernelSpecManager) |
|
61 | 61 | |
|
62 | 62 | def _kernel_spec_manager_default(self): |
|
63 | 63 | return kernelspec.KernelSpecManager(ipython_dir=self.ipython_dir) |
|
64 | 64 | |
|
65 | 65 | kernel_name = Unicode(kernelspec.NATIVE_KERNEL_NAME) |
|
66 | 66 | |
|
67 | 67 | kernel_spec = Instance(kernelspec.KernelSpec) |
|
68 | 68 | |
|
69 | 69 | def _kernel_spec_default(self): |
|
70 | 70 | return self.kernel_spec_manager.get_kernel_spec(self.kernel_name) |
|
71 | 71 | |
|
72 | 72 | def _kernel_name_changed(self, name, old, new): |
|
73 | 73 | if new == 'python': |
|
74 | 74 | self.kernel_name = kernelspec.NATIVE_KERNEL_NAME |
|
75 | 75 | # This triggered another run of this function, so we can exit now |
|
76 | 76 | return |
|
77 | 77 | self.kernel_spec = self.kernel_spec_manager.get_kernel_spec(new) |
|
78 | 78 | self.ipython_kernel = new in {'python', 'python2', 'python3'} |
|
79 | 79 | |
|
80 | 80 | kernel_cmd = List(Unicode, config=True, |
|
81 | 81 | help="""DEPRECATED: Use kernel_name instead. |
|
82 | 82 | |
|
83 | 83 | The Popen Command to launch the kernel. |
|
84 | 84 | Override this if you have a custom kernel. |
|
85 | 85 | If kernel_cmd is specified in a configuration file, |
|
86 | 86 | IPython does not pass any arguments to the kernel, |
|
87 | 87 | because it cannot make any assumptions about the |
|
88 | 88 | arguments that the kernel understands. In particular, |
|
89 | 89 | this means that the kernel does not receive the |
|
90 | 90 | option --debug if it given on the IPython command line. |
|
91 | 91 | """ |
|
92 | 92 | ) |
|
93 | 93 | |
|
94 | 94 | def _kernel_cmd_changed(self, name, old, new): |
|
95 | 95 | warnings.warn("Setting kernel_cmd is deprecated, use kernel_spec to " |
|
96 | 96 | "start different kernels.") |
|
97 | 97 | self.ipython_kernel = False |
|
98 | 98 | |
|
99 | 99 | ipython_kernel = Bool(True) |
|
100 | 100 | |
|
101 | 101 | ipython_dir = Unicode() |
|
102 | 102 | def _ipython_dir_default(self): |
|
103 | 103 | return get_ipython_dir() |
|
104 | 104 | |
|
105 | 105 | # Protected traits |
|
106 | 106 | _launch_args = Any() |
|
107 | 107 | _control_socket = Any() |
|
108 | 108 | |
|
109 | 109 | _restarter = Any() |
|
110 | 110 | |
|
111 | 111 | autorestart = Bool(False, config=True, |
|
112 | 112 | help="""Should we autorestart the kernel if it dies.""" |
|
113 | 113 | ) |
|
114 | 114 | |
|
115 | 115 | def __del__(self): |
|
116 | 116 | self._close_control_socket() |
|
117 | 117 | self.cleanup_connection_file() |
|
118 | 118 | |
|
119 | 119 | #-------------------------------------------------------------------------- |
|
120 | 120 | # Kernel restarter |
|
121 | 121 | #-------------------------------------------------------------------------- |
|
122 | 122 | |
|
123 | 123 | def start_restarter(self): |
|
124 | 124 | pass |
|
125 | 125 | |
|
126 | 126 | def stop_restarter(self): |
|
127 | 127 | pass |
|
128 | 128 | |
|
129 | 129 | def add_restart_callback(self, callback, event='restart'): |
|
130 | 130 | """register a callback to be called when a kernel is restarted""" |
|
131 | 131 | if self._restarter is None: |
|
132 | 132 | return |
|
133 | 133 | self._restarter.add_callback(callback, event) |
|
134 | 134 | |
|
135 | 135 | def remove_restart_callback(self, callback, event='restart'): |
|
136 | 136 | """unregister a callback to be called when a kernel is restarted""" |
|
137 | 137 | if self._restarter is None: |
|
138 | 138 | return |
|
139 | 139 | self._restarter.remove_callback(callback, event) |
|
140 | 140 | |
|
141 | 141 | #-------------------------------------------------------------------------- |
|
142 | 142 | # create a Client connected to our Kernel |
|
143 | 143 | #-------------------------------------------------------------------------- |
|
144 | 144 | |
|
145 | 145 | def client(self, **kwargs): |
|
146 | 146 | """Create a client configured to connect to our kernel""" |
|
147 | 147 | if self.client_factory is None: |
|
148 | 148 | self.client_factory = import_item(self.client_class) |
|
149 | 149 | |
|
150 | 150 | kw = {} |
|
151 | 151 | kw.update(self.get_connection_info()) |
|
152 | 152 | kw.update(dict( |
|
153 | 153 | connection_file=self.connection_file, |
|
154 | 154 | session=self.session, |
|
155 | 155 | parent=self, |
|
156 | 156 | )) |
|
157 | 157 | |
|
158 | 158 | # add kwargs last, for manual overrides |
|
159 | 159 | kw.update(kwargs) |
|
160 | 160 | return self.client_factory(**kw) |
|
161 | 161 | |
|
162 | 162 | #-------------------------------------------------------------------------- |
|
163 | 163 | # Kernel management |
|
164 | 164 | #-------------------------------------------------------------------------- |
|
165 | 165 | |
|
166 | 166 | def format_kernel_cmd(self, extra_arguments=None): |
|
167 | 167 | """replace templated args (e.g. {connection_file})""" |
|
168 | 168 | extra_arguments = extra_arguments or [] |
|
169 | 169 | if self.kernel_cmd: |
|
170 | 170 | cmd = self.kernel_cmd + extra_arguments |
|
171 | 171 | else: |
|
172 | 172 | cmd = self.kernel_spec.argv + extra_arguments |
|
173 | 173 | |
|
174 | 174 | ns = dict(connection_file=self.connection_file) |
|
175 | 175 | ns.update(self._launch_args) |
|
176 | 176 | |
|
177 | 177 | pat = re.compile(r'\{([A-Za-z0-9_]+)\}') |
|
178 | 178 | def from_ns(match): |
|
179 | 179 | """Get the key out of ns if it's there, otherwise no change.""" |
|
180 | 180 | return ns.get(match.group(1), match.group()) |
|
181 | 181 | |
|
182 | 182 | return [ pat.sub(from_ns, arg) for arg in cmd ] |
|
183 | 183 | |
|
184 | 184 | def _launch_kernel(self, kernel_cmd, **kw): |
|
185 | 185 | """actually launch the kernel |
|
186 | 186 | |
|
187 | 187 | override in a subclass to launch kernel subprocesses differently |
|
188 | 188 | """ |
|
189 | 189 | return launch_kernel(kernel_cmd, **kw) |
|
190 | 190 | |
|
191 | 191 | # Control socket used for polite kernel shutdown |
|
192 | 192 | |
|
193 | 193 | def _connect_control_socket(self): |
|
194 | 194 | if self._control_socket is None: |
|
195 | 195 | self._control_socket = self.connect_control() |
|
196 | 196 | self._control_socket.linger = 100 |
|
197 | 197 | |
|
198 | 198 | def _close_control_socket(self): |
|
199 | 199 | if self._control_socket is None: |
|
200 | 200 | return |
|
201 | 201 | self._control_socket.close() |
|
202 | 202 | self._control_socket = None |
|
203 | 203 | |
|
204 | 204 | def start_kernel(self, **kw): |
|
205 | 205 | """Starts a kernel on this host in a separate process. |
|
206 | 206 | |
|
207 | 207 | If random ports (port=0) are being used, this method must be called |
|
208 | 208 | before the channels are created. |
|
209 | 209 | |
|
210 | 210 | Parameters |
|
211 | 211 | ---------- |
|
212 | 212 | **kw : optional |
|
213 | 213 | keyword arguments that are passed down to build the kernel_cmd |
|
214 | 214 | and launching the kernel (e.g. Popen kwargs). |
|
215 | 215 | """ |
|
216 | 216 | if self.transport == 'tcp' and not is_local_ip(self.ip): |
|
217 | 217 | raise RuntimeError("Can only launch a kernel on a local interface. " |
|
218 | 218 | "Make sure that the '*_address' attributes are " |
|
219 | 219 | "configured properly. " |
|
220 | 220 | "Currently valid addresses are: %s" % local_ips() |
|
221 | 221 | ) |
|
222 | 222 | |
|
223 | 223 | # write connection file / get default ports |
|
224 | 224 | self.write_connection_file() |
|
225 | 225 | |
|
226 | 226 | # save kwargs for use in restart |
|
227 | 227 | self._launch_args = kw.copy() |
|
228 | 228 | # build the Popen cmd |
|
229 | 229 | extra_arguments = kw.pop('extra_arguments', []) |
|
230 | 230 | kernel_cmd = self.format_kernel_cmd(extra_arguments=extra_arguments) |
|
231 | 231 | if self.kernel_cmd: |
|
232 | 232 | # If kernel_cmd has been set manually, don't refer to a kernel spec |
|
233 | 233 | env = os.environ |
|
234 | 234 | else: |
|
235 | 235 | # Environment variables from kernel spec are added to os.environ |
|
236 | 236 | env = os.environ.copy() |
|
237 | 237 | env.update(self.kernel_spec.env or {}) |
|
238 | 238 | # launch the kernel subprocess |
|
239 | 239 | self.kernel = self._launch_kernel(kernel_cmd, env=env, |
|
240 | 240 | **kw) |
|
241 | 241 | self.start_restarter() |
|
242 | 242 | self._connect_control_socket() |
|
243 | 243 | |
|
244 | 244 | def request_shutdown(self, restart=False): |
|
245 | 245 | """Send a shutdown request via control channel |
|
246 | 246 | |
|
247 | 247 | On Windows, this just kills kernels instead, because the shutdown |
|
248 | 248 | messages don't work. |
|
249 | 249 | """ |
|
250 | 250 | content = dict(restart=restart) |
|
251 | 251 | msg = self.session.msg("shutdown_request", content=content) |
|
252 | 252 | self.session.send(self._control_socket, msg) |
|
253 | 253 | |
|
254 | 254 | def finish_shutdown(self, waittime=1, pollinterval=0.1): |
|
255 | 255 | """Wait for kernel shutdown, then kill process if it doesn't shutdown. |
|
256 | 256 | |
|
257 | 257 | This does not send shutdown requests - use :meth:`request_shutdown` |
|
258 | 258 | first. |
|
259 | 259 | """ |
|
260 | 260 | for i in range(int(waittime/pollinterval)): |
|
261 | 261 | if self.is_alive(): |
|
262 | 262 | time.sleep(pollinterval) |
|
263 | 263 | else: |
|
264 | 264 | break |
|
265 | 265 | else: |
|
266 | 266 | # OK, we've waited long enough. |
|
267 | 267 | if self.has_kernel: |
|
268 | 268 | self._kill_kernel() |
|
269 | 269 | |
|
270 | 270 | def cleanup(self, connection_file=True): |
|
271 | 271 | """Clean up resources when the kernel is shut down""" |
|
272 | 272 | if connection_file: |
|
273 | 273 | self.cleanup_connection_file() |
|
274 | 274 | |
|
275 | 275 | self.cleanup_ipc_files() |
|
276 | 276 | self._close_control_socket() |
|
277 | 277 | |
|
278 | 278 | def shutdown_kernel(self, now=False, restart=False): |
|
279 | 279 | """Attempts to the stop the kernel process cleanly. |
|
280 | 280 | |
|
281 | 281 | This attempts to shutdown the kernels cleanly by: |
|
282 | 282 | |
|
283 | 283 | 1. Sending it a shutdown message over the shell channel. |
|
284 | 284 | 2. If that fails, the kernel is shutdown forcibly by sending it |
|
285 | 285 | a signal. |
|
286 | 286 | |
|
287 | 287 | Parameters |
|
288 | 288 | ---------- |
|
289 | 289 | now : bool |
|
290 | 290 | Should the kernel be forcible killed *now*. This skips the |
|
291 | 291 | first, nice shutdown attempt. |
|
292 | 292 | restart: bool |
|
293 | 293 | Will this kernel be restarted after it is shutdown. When this |
|
294 | 294 | is True, connection files will not be cleaned up. |
|
295 | 295 | """ |
|
296 | 296 | # Stop monitoring for restarting while we shutdown. |
|
297 | 297 | self.stop_restarter() |
|
298 | 298 | |
|
299 | 299 | if now: |
|
300 | 300 | self._kill_kernel() |
|
301 | 301 | else: |
|
302 | 302 | self.request_shutdown(restart=restart) |
|
303 | 303 | # Don't send any additional kernel kill messages immediately, to give |
|
304 | 304 | # the kernel a chance to properly execute shutdown actions. Wait for at |
|
305 | 305 | # most 1s, checking every 0.1s. |
|
306 | 306 | self.finish_shutdown() |
|
307 | 307 | |
|
308 | 308 | self.cleanup(connection_file=not restart) |
|
309 | 309 | |
|
310 | 310 | def restart_kernel(self, now=False, **kw): |
|
311 | 311 | """Restarts a kernel with the arguments that were used to launch it. |
|
312 | 312 | |
|
313 | 313 | If the old kernel was launched with random ports, the same ports will be |
|
314 | 314 | used for the new kernel. The same connection file is used again. |
|
315 | 315 | |
|
316 | 316 | Parameters |
|
317 | 317 | ---------- |
|
318 | 318 | now : bool, optional |
|
319 | 319 | If True, the kernel is forcefully restarted *immediately*, without |
|
320 | 320 | having a chance to do any cleanup action. Otherwise the kernel is |
|
321 | 321 | given 1s to clean up before a forceful restart is issued. |
|
322 | 322 | |
|
323 | 323 | In all cases the kernel is restarted, the only difference is whether |
|
324 | 324 | it is given a chance to perform a clean shutdown or not. |
|
325 | 325 | |
|
326 | 326 | **kw : optional |
|
327 | 327 | Any options specified here will overwrite those used to launch the |
|
328 | 328 | kernel. |
|
329 | 329 | """ |
|
330 | 330 | if self._launch_args is None: |
|
331 | 331 | raise RuntimeError("Cannot restart the kernel. " |
|
332 | 332 | "No previous call to 'start_kernel'.") |
|
333 | 333 | else: |
|
334 | 334 | # Stop currently running kernel. |
|
335 | 335 | self.shutdown_kernel(now=now, restart=True) |
|
336 | 336 | |
|
337 | 337 | # Start new kernel. |
|
338 | 338 | self._launch_args.update(kw) |
|
339 | 339 | self.start_kernel(**self._launch_args) |
|
340 | 340 | |
|
341 | 341 | @property |
|
342 | 342 | def has_kernel(self): |
|
343 | 343 | """Has a kernel been started that we are managing.""" |
|
344 | 344 | return self.kernel is not None |
|
345 | 345 | |
|
346 | 346 | def _kill_kernel(self): |
|
347 | 347 | """Kill the running kernel. |
|
348 | 348 | |
|
349 | 349 | This is a private method, callers should use shutdown_kernel(now=True). |
|
350 | 350 | """ |
|
351 | 351 | if self.has_kernel: |
|
352 | 352 | |
|
353 | 353 | # Signal the kernel to terminate (sends SIGKILL on Unix and calls |
|
354 | 354 | # TerminateProcess() on Win32). |
|
355 | 355 | try: |
|
356 | 356 | self.kernel.kill() |
|
357 | 357 | except OSError as e: |
|
358 | 358 | # In Windows, we will get an Access Denied error if the process |
|
359 | 359 | # has already terminated. Ignore it. |
|
360 | 360 | if sys.platform == 'win32': |
|
361 | 361 | if e.winerror != 5: |
|
362 | 362 | raise |
|
363 | 363 | # On Unix, we may get an ESRCH error if the process has already |
|
364 | 364 | # terminated. Ignore it. |
|
365 | 365 | else: |
|
366 | 366 | from errno import ESRCH |
|
367 | 367 | if e.errno != ESRCH: |
|
368 | 368 | raise |
|
369 | 369 | |
|
370 | 370 | # Block until the kernel terminates. |
|
371 | 371 | self.kernel.wait() |
|
372 | 372 | self.kernel = None |
|
373 | 373 | else: |
|
374 | 374 | raise RuntimeError("Cannot kill kernel. No kernel is running!") |
|
375 | 375 | |
|
376 | 376 | def interrupt_kernel(self): |
|
377 | 377 | """Interrupts the kernel by sending it a signal. |
|
378 | 378 | |
|
379 | 379 | Unlike ``signal_kernel``, this operation is well supported on all |
|
380 | 380 | platforms. |
|
381 | 381 | """ |
|
382 | 382 | if self.has_kernel: |
|
383 | 383 | if sys.platform == 'win32': |
|
384 | 384 | from .win_interrupt import send_interrupt |
|
385 | 385 | send_interrupt(self.kernel.win32_interrupt_event) |
|
386 | 386 | else: |
|
387 | 387 | self.kernel.send_signal(signal.SIGINT) |
|
388 | 388 | else: |
|
389 | 389 | raise RuntimeError("Cannot interrupt kernel. No kernel is running!") |
|
390 | 390 | |
|
391 | 391 | def signal_kernel(self, signum): |
|
392 | 392 | """Sends a signal to the kernel. |
|
393 | 393 | |
|
394 | 394 | Note that since only SIGTERM is supported on Windows, this function is |
|
395 | 395 | only useful on Unix systems. |
|
396 | 396 | """ |
|
397 | 397 | if self.has_kernel: |
|
398 | 398 | self.kernel.send_signal(signum) |
|
399 | 399 | else: |
|
400 | 400 | raise RuntimeError("Cannot signal kernel. No kernel is running!") |
|
401 | 401 | |
|
402 | 402 | def is_alive(self): |
|
403 | 403 | """Is the kernel process still running?""" |
|
404 | 404 | if self.has_kernel: |
|
405 | 405 | if self.kernel.poll() is None: |
|
406 | 406 | return True |
|
407 | 407 | else: |
|
408 | 408 | return False |
|
409 | 409 | else: |
|
410 | 410 | # we don't have a kernel |
|
411 | 411 | return False |
|
412 | 412 | |
|
413 | 413 | |
|
414 | 414 | KernelManagerABC.register(KernelManager) |
|
415 | 415 | |
|
416 | 416 | |
|
417 | 417 | def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs): |
|
418 | 418 | """Start a new kernel, and return its Manager and Client""" |
|
419 | 419 | km = KernelManager(kernel_name=kernel_name) |
|
420 | 420 | km.start_kernel(**kwargs) |
|
421 | 421 | kc = km.client() |
|
422 | 422 | kc.start_channels() |
|
423 | 423 | kc.wait_for_ready() |
|
424 | 424 | |
|
425 | 425 | return km, kc |
|
426 | 426 | |
|
427 | 427 | @contextmanager |
|
428 | 428 | def run_kernel(**kwargs): |
|
429 | 429 | """Context manager to create a kernel in a subprocess. |
|
430 | 430 | |
|
431 | 431 | The kernel is shut down when the context exits. |
|
432 | 432 | |
|
433 | 433 | Returns |
|
434 | 434 | ------- |
|
435 | 435 | kernel_client: connected KernelClient instance |
|
436 | 436 | """ |
|
437 | 437 | km, kc = start_new_kernel(**kwargs) |
|
438 | 438 | try: |
|
439 | 439 | yield kc |
|
440 | 440 | finally: |
|
441 | 441 | kc.stop_channels() |
|
442 | 442 | km.shutdown_kernel(now=True) |
|
1 | NO CONTENT: file renamed from IPython/utils/tests/test_localinterfaces.py to jupyter_client/tests/test_localinterfaces.py |
@@ -1,86 +1,86 b'' | |||
|
1 | 1 | """Tests for the notebook kernel and session manager.""" |
|
2 | 2 | |
|
3 | 3 | from subprocess import PIPE |
|
4 | 4 | import time |
|
5 | 5 | from unittest import TestCase |
|
6 | 6 | |
|
7 | 7 | from IPython.testing import decorators as dec |
|
8 | 8 | |
|
9 | 9 | from IPython.config.loader import Config |
|
10 |
from |
|
|
10 | from ..localinterfaces import localhost | |
|
11 | 11 | from jupyter_client import KernelManager |
|
12 | 12 | from jupyter_client.multikernelmanager import MultiKernelManager |
|
13 | 13 | |
|
14 | 14 | class TestKernelManager(TestCase): |
|
15 | 15 | |
|
16 | 16 | def _get_tcp_km(self): |
|
17 | 17 | c = Config() |
|
18 | 18 | km = MultiKernelManager(config=c) |
|
19 | 19 | return km |
|
20 | 20 | |
|
21 | 21 | def _get_ipc_km(self): |
|
22 | 22 | c = Config() |
|
23 | 23 | c.KernelManager.transport = 'ipc' |
|
24 | 24 | c.KernelManager.ip = 'test' |
|
25 | 25 | km = MultiKernelManager(config=c) |
|
26 | 26 | return km |
|
27 | 27 | |
|
28 | 28 | def _run_lifecycle(self, km): |
|
29 | 29 | kid = km.start_kernel(stdout=PIPE, stderr=PIPE) |
|
30 | 30 | self.assertTrue(km.is_alive(kid)) |
|
31 | 31 | self.assertTrue(kid in km) |
|
32 | 32 | self.assertTrue(kid in km.list_kernel_ids()) |
|
33 | 33 | self.assertEqual(len(km),1) |
|
34 | 34 | km.restart_kernel(kid, now=True) |
|
35 | 35 | self.assertTrue(km.is_alive(kid)) |
|
36 | 36 | self.assertTrue(kid in km.list_kernel_ids()) |
|
37 | 37 | km.interrupt_kernel(kid) |
|
38 | 38 | k = km.get_kernel(kid) |
|
39 | 39 | self.assertTrue(isinstance(k, KernelManager)) |
|
40 | 40 | km.shutdown_kernel(kid, now=True) |
|
41 | 41 | self.assertTrue(not kid in km) |
|
42 | 42 | |
|
43 | 43 | def _run_cinfo(self, km, transport, ip): |
|
44 | 44 | kid = km.start_kernel(stdout=PIPE, stderr=PIPE) |
|
45 | 45 | k = km.get_kernel(kid) |
|
46 | 46 | cinfo = km.get_connection_info(kid) |
|
47 | 47 | self.assertEqual(transport, cinfo['transport']) |
|
48 | 48 | self.assertEqual(ip, cinfo['ip']) |
|
49 | 49 | self.assertTrue('stdin_port' in cinfo) |
|
50 | 50 | self.assertTrue('iopub_port' in cinfo) |
|
51 | 51 | stream = km.connect_iopub(kid) |
|
52 | 52 | stream.close() |
|
53 | 53 | self.assertTrue('shell_port' in cinfo) |
|
54 | 54 | stream = km.connect_shell(kid) |
|
55 | 55 | stream.close() |
|
56 | 56 | self.assertTrue('hb_port' in cinfo) |
|
57 | 57 | stream = km.connect_hb(kid) |
|
58 | 58 | stream.close() |
|
59 | 59 | km.shutdown_kernel(kid, now=True) |
|
60 | 60 | |
|
61 | 61 | def test_tcp_lifecycle(self): |
|
62 | 62 | km = self._get_tcp_km() |
|
63 | 63 | self._run_lifecycle(km) |
|
64 | 64 | |
|
65 | 65 | def test_shutdown_all(self): |
|
66 | 66 | km = self._get_tcp_km() |
|
67 | 67 | kid = km.start_kernel(stdout=PIPE, stderr=PIPE) |
|
68 | 68 | self.assertIn(kid, km) |
|
69 | 69 | km.shutdown_all() |
|
70 | 70 | self.assertNotIn(kid, km) |
|
71 | 71 | # shutdown again is okay, because we have no kernels |
|
72 | 72 | km.shutdown_all() |
|
73 | 73 | |
|
74 | 74 | def test_tcp_cinfo(self): |
|
75 | 75 | km = self._get_tcp_km() |
|
76 | 76 | self._run_cinfo(km, 'tcp', localhost()) |
|
77 | 77 | |
|
78 | 78 | @dec.skip_win32 |
|
79 | 79 | def test_ipc_lifecycle(self): |
|
80 | 80 | km = self._get_ipc_km() |
|
81 | 81 | self._run_lifecycle(km) |
|
82 | 82 | |
|
83 | 83 | @dec.skip_win32 |
|
84 | 84 | def test_ipc_cinfo(self): |
|
85 | 85 | km = self._get_ipc_km() |
|
86 | 86 | self._run_cinfo(km, 'ipc', 'test') |
General Comments 0
You need to be logged in to leave comments.
Login now