Show More
@@ -1,25 +1,26 b'' | |||
|
1 | 1 | MANIFEST |
|
2 | 2 | build |
|
3 | 3 | dist |
|
4 | 4 | _build |
|
5 | 5 | docs/man/*.gz |
|
6 | 6 | docs/source/api/generated |
|
7 | 7 | docs/source/config/options |
|
8 | 8 | docs/source/config/shortcuts/*.csv |
|
9 | 9 | docs/source/interactive/magics-generated.txt |
|
10 | 10 | docs/source/config/shortcuts/*.csv |
|
11 | 11 | docs/gh-pages |
|
12 | 12 | jupyter_notebook/notebook/static/mathjax |
|
13 | 13 | jupyter_notebook/static/style/*.map |
|
14 | 14 | *.py[co] |
|
15 | 15 | __pycache__ |
|
16 | 16 | *.egg-info |
|
17 | 17 | *~ |
|
18 | 18 | *.bak |
|
19 | 19 | .ipynb_checkpoints |
|
20 | 20 | .tox |
|
21 | 21 | .DS_Store |
|
22 | 22 | \#*# |
|
23 | 23 | .#* |
|
24 | .cache | |
|
24 | 25 | .coverage |
|
25 | 26 | *.swp |
@@ -1,279 +1,279 b'' | |||
|
1 | 1 | """Magic functions for running cells in various scripts.""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | import errno |
|
7 | 7 | import os |
|
8 | 8 | import sys |
|
9 | 9 | import signal |
|
10 | 10 | import time |
|
11 | 11 | from subprocess import Popen, PIPE |
|
12 | 12 | import atexit |
|
13 | 13 | |
|
14 | 14 | from IPython.core import magic_arguments |
|
15 | 15 | from IPython.core.magic import ( |
|
16 | 16 | Magics, magics_class, line_magic, cell_magic |
|
17 | 17 | ) |
|
18 | 18 | from IPython.lib.backgroundjobs import BackgroundJobManager |
|
19 | 19 | from IPython.utils import py3compat |
|
20 | 20 | from IPython.utils.process import arg_split |
|
21 | 21 | from traitlets import List, Dict, default |
|
22 | 22 | |
|
23 | 23 | #----------------------------------------------------------------------------- |
|
24 | 24 | # Magic implementation classes |
|
25 | 25 | #----------------------------------------------------------------------------- |
|
26 | 26 | |
|
27 | 27 | def script_args(f): |
|
28 | 28 | """single decorator for adding script args""" |
|
29 | 29 | args = [ |
|
30 | 30 | magic_arguments.argument( |
|
31 | 31 | '--out', type=str, |
|
32 | 32 | help="""The variable in which to store stdout from the script. |
|
33 | 33 | If the script is backgrounded, this will be the stdout *pipe*, |
|
34 | 34 | instead of the stderr text itself. |
|
35 | 35 | """ |
|
36 | 36 | ), |
|
37 | 37 | magic_arguments.argument( |
|
38 | 38 | '--err', type=str, |
|
39 | 39 | help="""The variable in which to store stderr from the script. |
|
40 | 40 | If the script is backgrounded, this will be the stderr *pipe*, |
|
41 | 41 | instead of the stderr text itself. |
|
42 | 42 | """ |
|
43 | 43 | ), |
|
44 | 44 | magic_arguments.argument( |
|
45 | 45 | '--bg', action="store_true", |
|
46 | 46 | help="""Whether to run the script in the background. |
|
47 | 47 | If given, the only way to see the output of the command is |
|
48 | 48 | with --out/err. |
|
49 | 49 | """ |
|
50 | 50 | ), |
|
51 | 51 | magic_arguments.argument( |
|
52 | 52 | '--proc', type=str, |
|
53 | 53 | help="""The variable in which to store Popen instance. |
|
54 | 54 | This is used only when --bg option is given. |
|
55 | 55 | """ |
|
56 | 56 | ), |
|
57 | 57 | ] |
|
58 | 58 | for arg in args: |
|
59 | 59 | f = arg(f) |
|
60 | 60 | return f |
|
61 | 61 | |
|
62 | 62 | @magics_class |
|
63 | 63 | class ScriptMagics(Magics): |
|
64 | 64 | """Magics for talking to scripts |
|
65 | 65 | |
|
66 | 66 | This defines a base `%%script` cell magic for running a cell |
|
67 | 67 | with a program in a subprocess, and registers a few top-level |
|
68 | 68 | magics that call %%script with common interpreters. |
|
69 | 69 | """ |
|
70 | 70 | script_magics = List( |
|
71 | 71 | help="""Extra script cell magics to define |
|
72 | 72 | |
|
73 | 73 | This generates simple wrappers of `%%script foo` as `%%foo`. |
|
74 | 74 | |
|
75 | 75 | If you want to add script magics that aren't on your path, |
|
76 | 76 | specify them in script_paths |
|
77 | 77 | """, |
|
78 | 78 | ).tag(config=True) |
|
79 | 79 | @default('script_magics') |
|
80 | 80 | def _script_magics_default(self): |
|
81 | 81 | """default to a common list of programs""" |
|
82 | 82 | |
|
83 | 83 | defaults = [ |
|
84 | 84 | 'sh', |
|
85 | 85 | 'bash', |
|
86 | 86 | 'perl', |
|
87 | 87 | 'ruby', |
|
88 | 88 | 'python', |
|
89 | 89 | 'python2', |
|
90 | 90 | 'python3', |
|
91 | 91 | 'pypy', |
|
92 | 92 | ] |
|
93 | 93 | if os.name == 'nt': |
|
94 | 94 | defaults.extend([ |
|
95 | 95 | 'cmd', |
|
96 | 96 | ]) |
|
97 | 97 | |
|
98 | 98 | return defaults |
|
99 | 99 | |
|
100 | 100 | script_paths = Dict( |
|
101 | 101 | help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' |
|
102 | 102 | |
|
103 | 103 | Only necessary for items in script_magics where the default path will not |
|
104 | 104 | find the right interpreter. |
|
105 | 105 | """ |
|
106 | 106 | ).tag(config=True) |
|
107 | 107 | |
|
108 | 108 | def __init__(self, shell=None): |
|
109 | 109 | super(ScriptMagics, self).__init__(shell=shell) |
|
110 | 110 | self._generate_script_magics() |
|
111 | 111 | self.job_manager = BackgroundJobManager() |
|
112 | 112 | self.bg_processes = [] |
|
113 | 113 | atexit.register(self.kill_bg_processes) |
|
114 | 114 | |
|
115 | 115 | def __del__(self): |
|
116 | 116 | self.kill_bg_processes() |
|
117 | 117 | |
|
118 | 118 | def _generate_script_magics(self): |
|
119 | 119 | cell_magics = self.magics['cell'] |
|
120 | 120 | for name in self.script_magics: |
|
121 | 121 | cell_magics[name] = self._make_script_magic(name) |
|
122 | 122 | |
|
123 | 123 | def _make_script_magic(self, name): |
|
124 | 124 | """make a named magic, that calls %%script with a particular program""" |
|
125 | 125 | # expand to explicit path if necessary: |
|
126 | 126 | script = self.script_paths.get(name, name) |
|
127 | 127 | |
|
128 | 128 | @magic_arguments.magic_arguments() |
|
129 | 129 | @script_args |
|
130 | 130 | def named_script_magic(line, cell): |
|
131 | 131 | # if line, add it as cl-flags |
|
132 | 132 | if line: |
|
133 | 133 | line = "%s %s" % (script, line) |
|
134 | 134 | else: |
|
135 | 135 | line = script |
|
136 | 136 | return self.shebang(line, cell) |
|
137 | 137 | |
|
138 | 138 | # write a basic docstring: |
|
139 | 139 | named_script_magic.__doc__ = \ |
|
140 | 140 | """%%{name} script magic |
|
141 | 141 | |
|
142 | 142 | Run cells with {script} in a subprocess. |
|
143 | 143 | |
|
144 | 144 | This is a shortcut for `%%script {script}` |
|
145 | 145 | """.format(**locals()) |
|
146 | 146 | |
|
147 | 147 | return named_script_magic |
|
148 | 148 | |
|
149 | 149 | @magic_arguments.magic_arguments() |
|
150 | 150 | @script_args |
|
151 | 151 | @cell_magic("script") |
|
152 | 152 | def shebang(self, line, cell): |
|
153 | 153 | """Run a cell via a shell command |
|
154 | 154 | |
|
155 | 155 | The `%%script` line is like the #! line of script, |
|
156 | 156 | specifying a program (bash, perl, ruby, etc.) with which to run. |
|
157 | 157 | |
|
158 | 158 | The rest of the cell is run by that program. |
|
159 | 159 | |
|
160 | 160 | Examples |
|
161 | 161 | -------- |
|
162 | 162 | :: |
|
163 | 163 | |
|
164 | 164 | In [1]: %%script bash |
|
165 | 165 | ...: for i in 1 2 3; do |
|
166 | 166 | ...: echo $i |
|
167 | 167 | ...: done |
|
168 | 168 | 1 |
|
169 | 169 | 2 |
|
170 | 170 | 3 |
|
171 | 171 | """ |
|
172 | 172 | argv = arg_split(line, posix = not sys.platform.startswith('win')) |
|
173 | 173 | args, cmd = self.shebang.parser.parse_known_args(argv) |
|
174 | 174 | |
|
175 | 175 | try: |
|
176 | 176 | p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) |
|
177 | 177 | except OSError as e: |
|
178 | 178 | if e.errno == errno.ENOENT: |
|
179 | 179 | print("Couldn't find program: %r" % cmd[0]) |
|
180 | 180 | return |
|
181 | 181 | else: |
|
182 | 182 | raise |
|
183 | 183 | |
|
184 | 184 | if not cell.endswith('\n'): |
|
185 | 185 | cell += '\n' |
|
186 | 186 | cell = cell.encode('utf8', 'replace') |
|
187 | 187 | if args.bg: |
|
188 | 188 | self.bg_processes.append(p) |
|
189 | 189 | self._gc_bg_processes() |
|
190 | 190 | if args.out: |
|
191 | 191 | self.shell.user_ns[args.out] = p.stdout |
|
192 | 192 | if args.err: |
|
193 | 193 | self.shell.user_ns[args.err] = p.stderr |
|
194 | 194 | self.job_manager.new(self._run_script, p, cell, daemon=True) |
|
195 | 195 | if args.proc: |
|
196 | 196 | self.shell.user_ns[args.proc] = p |
|
197 | 197 | return |
|
198 | 198 | |
|
199 | 199 | try: |
|
200 | 200 | out, err = p.communicate(cell) |
|
201 | 201 | except KeyboardInterrupt: |
|
202 | 202 | try: |
|
203 | 203 | p.send_signal(signal.SIGINT) |
|
204 | 204 | time.sleep(0.1) |
|
205 | 205 | if p.poll() is not None: |
|
206 | 206 | print("Process is interrupted.") |
|
207 | 207 | return |
|
208 | 208 | p.terminate() |
|
209 | 209 | time.sleep(0.1) |
|
210 | 210 | if p.poll() is not None: |
|
211 | 211 | print("Process is terminated.") |
|
212 | 212 | return |
|
213 | 213 | p.kill() |
|
214 | 214 | print("Process is killed.") |
|
215 | 215 | except OSError: |
|
216 | 216 | pass |
|
217 | 217 | except Exception as e: |
|
218 | 218 | print("Error while terminating subprocess (pid=%i): %s" \ |
|
219 | 219 | % (p.pid, e)) |
|
220 | 220 | return |
|
221 |
out = py3compat. |
|
|
222 |
err = py3compat. |
|
|
221 | out = py3compat.decode(out) | |
|
222 | err = py3compat.decode(err) | |
|
223 | 223 | if args.out: |
|
224 | 224 | self.shell.user_ns[args.out] = out |
|
225 | 225 | else: |
|
226 | 226 | sys.stdout.write(out) |
|
227 | 227 | sys.stdout.flush() |
|
228 | 228 | if args.err: |
|
229 | 229 | self.shell.user_ns[args.err] = err |
|
230 | 230 | else: |
|
231 | 231 | sys.stderr.write(err) |
|
232 | 232 | sys.stderr.flush() |
|
233 | 233 | |
|
234 | 234 | def _run_script(self, p, cell): |
|
235 | 235 | """callback for running the script in the background""" |
|
236 | 236 | p.stdin.write(cell) |
|
237 | 237 | p.stdin.close() |
|
238 | 238 | p.wait() |
|
239 | 239 | |
|
240 | 240 | @line_magic("killbgscripts") |
|
241 | 241 | def killbgscripts(self, _nouse_=''): |
|
242 | 242 | """Kill all BG processes started by %%script and its family.""" |
|
243 | 243 | self.kill_bg_processes() |
|
244 | 244 | print("All background processes were killed.") |
|
245 | 245 | |
|
246 | 246 | def kill_bg_processes(self): |
|
247 | 247 | """Kill all BG processes which are still running.""" |
|
248 | 248 | if not self.bg_processes: |
|
249 | 249 | return |
|
250 | 250 | for p in self.bg_processes: |
|
251 | 251 | if p.poll() is None: |
|
252 | 252 | try: |
|
253 | 253 | p.send_signal(signal.SIGINT) |
|
254 | 254 | except: |
|
255 | 255 | pass |
|
256 | 256 | time.sleep(0.1) |
|
257 | 257 | self._gc_bg_processes() |
|
258 | 258 | if not self.bg_processes: |
|
259 | 259 | return |
|
260 | 260 | for p in self.bg_processes: |
|
261 | 261 | if p.poll() is None: |
|
262 | 262 | try: |
|
263 | 263 | p.terminate() |
|
264 | 264 | except: |
|
265 | 265 | pass |
|
266 | 266 | time.sleep(0.1) |
|
267 | 267 | self._gc_bg_processes() |
|
268 | 268 | if not self.bg_processes: |
|
269 | 269 | return |
|
270 | 270 | for p in self.bg_processes: |
|
271 | 271 | if p.poll() is None: |
|
272 | 272 | try: |
|
273 | 273 | p.kill() |
|
274 | 274 | except: |
|
275 | 275 | pass |
|
276 | 276 | self._gc_bg_processes() |
|
277 | 277 | |
|
278 | 278 | def _gc_bg_processes(self): |
|
279 | 279 | self.bg_processes = [p for p in self.bg_processes if p.poll() is None] |
@@ -1,74 +1,73 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | """Tests for IPython.core.application""" |
|
3 | 3 | |
|
4 | 4 | import os |
|
5 | 5 | import tempfile |
|
6 | 6 | |
|
7 | 7 | import nose.tools as nt |
|
8 | 8 | |
|
9 | 9 | from traitlets import Unicode |
|
10 | 10 | |
|
11 | 11 | from IPython.core.application import BaseIPythonApplication |
|
12 | 12 | from IPython.testing import decorators as dec |
|
13 | from IPython.utils import py3compat | |
|
14 | 13 | from IPython.utils.tempdir import TemporaryDirectory |
|
15 | 14 | |
|
16 | 15 | |
|
17 | 16 | @dec.onlyif_unicode_paths |
|
18 | 17 | def test_unicode_cwd(): |
|
19 | 18 | """Check that IPython starts with non-ascii characters in the path.""" |
|
20 | 19 | wd = tempfile.mkdtemp(suffix=u"€") |
|
21 | 20 | |
|
22 | 21 | old_wd = os.getcwd() |
|
23 | 22 | os.chdir(wd) |
|
24 | 23 | #raise Exception(repr(os.getcwd())) |
|
25 | 24 | try: |
|
26 | 25 | app = BaseIPythonApplication() |
|
27 | 26 | # The lines below are copied from Application.initialize() |
|
28 | 27 | app.init_profile_dir() |
|
29 | 28 | app.init_config_files() |
|
30 | 29 | app.load_config_file(suppress_errors=False) |
|
31 | 30 | finally: |
|
32 | 31 | os.chdir(old_wd) |
|
33 | 32 | |
|
34 | 33 | @dec.onlyif_unicode_paths |
|
35 | 34 | def test_unicode_ipdir(): |
|
36 | 35 | """Check that IPython starts with non-ascii characters in the IP dir.""" |
|
37 | 36 | ipdir = tempfile.mkdtemp(suffix=u"€") |
|
38 | 37 | |
|
39 | 38 | # Create the config file, so it tries to load it. |
|
40 | 39 | with open(os.path.join(ipdir, 'ipython_config.py'), "w") as f: |
|
41 | 40 | pass |
|
42 | 41 | |
|
43 | 42 | old_ipdir1 = os.environ.pop("IPYTHONDIR", None) |
|
44 | 43 | old_ipdir2 = os.environ.pop("IPYTHON_DIR", None) |
|
45 | 44 | os.environ["IPYTHONDIR"] = ipdir |
|
46 | 45 | try: |
|
47 | 46 | app = BaseIPythonApplication() |
|
48 | 47 | # The lines below are copied from Application.initialize() |
|
49 | 48 | app.init_profile_dir() |
|
50 | 49 | app.init_config_files() |
|
51 | 50 | app.load_config_file(suppress_errors=False) |
|
52 | 51 | finally: |
|
53 | 52 | if old_ipdir1: |
|
54 | 53 | os.environ["IPYTHONDIR"] = old_ipdir1 |
|
55 | 54 | if old_ipdir2: |
|
56 | 55 | os.environ["IPYTHONDIR"] = old_ipdir2 |
|
57 | 56 | |
|
58 | 57 | def test_cli_priority(): |
|
59 | 58 | with TemporaryDirectory() as td: |
|
60 | 59 | |
|
61 | 60 | class TestApp(BaseIPythonApplication): |
|
62 | 61 | test = Unicode().tag(config=True) |
|
63 | 62 | |
|
64 | 63 | # Create the config file, so it tries to load it. |
|
65 | 64 | with open(os.path.join(td, 'ipython_config.py'), "w") as f: |
|
66 | 65 | f.write("c.TestApp.test = 'config file'") |
|
67 | 66 | |
|
68 | 67 | app = TestApp() |
|
69 | 68 | app.initialize(['--profile-dir', td]) |
|
70 | 69 | nt.assert_equal(app.test, 'config file') |
|
71 | 70 | app = TestApp() |
|
72 | 71 | app.initialize(['--profile-dir', td, '--TestApp.test=cli']) |
|
73 | 72 | nt.assert_equal(app.test, 'cli') |
|
74 | 73 |
@@ -1,74 +1,73 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | """Tests for the compilerop module. |
|
3 | 3 | """ |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Copyright (C) 2010-2011 The IPython Development Team. |
|
6 | 6 | # |
|
7 | 7 | # Distributed under the terms of the BSD License. |
|
8 | 8 | # |
|
9 | 9 | # The full license is in the file COPYING.txt, distributed with this software. |
|
10 | 10 | #----------------------------------------------------------------------------- |
|
11 | 11 | |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | # Imports |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | |
|
16 | 16 | # Stdlib imports |
|
17 | 17 | import linecache |
|
18 | 18 | import sys |
|
19 | 19 | |
|
20 | 20 | # Third-party imports |
|
21 | 21 | import nose.tools as nt |
|
22 | 22 | |
|
23 | 23 | # Our own imports |
|
24 | 24 | from IPython.core import compilerop |
|
25 | from IPython.utils import py3compat | |
|
26 | 25 | |
|
27 | 26 | #----------------------------------------------------------------------------- |
|
28 | 27 | # Test functions |
|
29 | 28 | #----------------------------------------------------------------------------- |
|
30 | 29 | |
|
31 | 30 | def test_code_name(): |
|
32 | 31 | code = 'x=1' |
|
33 | 32 | name = compilerop.code_name(code) |
|
34 | 33 | nt.assert_true(name.startswith('<ipython-input-0')) |
|
35 | 34 | |
|
36 | 35 | |
|
37 | 36 | def test_code_name2(): |
|
38 | 37 | code = 'x=1' |
|
39 | 38 | name = compilerop.code_name(code, 9) |
|
40 | 39 | nt.assert_true(name.startswith('<ipython-input-9')) |
|
41 | 40 | |
|
42 | 41 | |
|
43 | 42 | def test_cache(): |
|
44 | 43 | """Test the compiler correctly compiles and caches inputs |
|
45 | 44 | """ |
|
46 | 45 | cp = compilerop.CachingCompiler() |
|
47 | 46 | ncache = len(linecache.cache) |
|
48 | 47 | cp.cache('x=1') |
|
49 | 48 | nt.assert_true(len(linecache.cache) > ncache) |
|
50 | 49 | |
|
51 | 50 | def setUp(): |
|
52 | 51 | # Check we're in a proper Python 2 environment (some imports, such |
|
53 | 52 | # as GTK, can change the default encoding, which can hide bugs.) |
|
54 | 53 | nt.assert_equal(sys.getdefaultencoding(), "utf-8") |
|
55 | 54 | |
|
56 | 55 | def test_cache_unicode(): |
|
57 | 56 | cp = compilerop.CachingCompiler() |
|
58 | 57 | ncache = len(linecache.cache) |
|
59 | 58 | cp.cache(u"t = 'žćčšđ'") |
|
60 | 59 | nt.assert_true(len(linecache.cache) > ncache) |
|
61 | 60 | |
|
62 | 61 | def test_compiler_check_cache(): |
|
63 | 62 | """Test the compiler properly manages the cache. |
|
64 | 63 | """ |
|
65 | 64 | # Rather simple-minded tests that just exercise the API |
|
66 | 65 | cp = compilerop.CachingCompiler() |
|
67 | 66 | cp.cache('x=1', 99) |
|
68 | 67 | # Ensure now that after clearing the cache, our entries survive |
|
69 | 68 | linecache.checkcache() |
|
70 | 69 | for k in linecache.cache: |
|
71 | 70 | if k.startswith('<ipython-input-99'): |
|
72 | 71 | break |
|
73 | 72 | else: |
|
74 | 73 | raise AssertionError('Entry for input-99 missing from linecache') |
@@ -1,162 +1,161 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Tests for completerlib. |
|
3 | 3 | |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Imports |
|
8 | 8 | #----------------------------------------------------------------------------- |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import shutil |
|
12 | 12 | import sys |
|
13 | 13 | import tempfile |
|
14 | 14 | import unittest |
|
15 | 15 | from os.path import join |
|
16 | 16 | |
|
17 | 17 | import nose.tools as nt |
|
18 | 18 | |
|
19 | 19 | from IPython.core.completerlib import magic_run_completer, module_completion |
|
20 | from IPython.utils import py3compat | |
|
21 | 20 | from IPython.utils.tempdir import TemporaryDirectory |
|
22 | 21 | from IPython.testing.decorators import onlyif_unicode_paths |
|
23 | 22 | |
|
24 | 23 | |
|
25 | 24 | class MockEvent(object): |
|
26 | 25 | def __init__(self, line): |
|
27 | 26 | self.line = line |
|
28 | 27 | |
|
29 | 28 | #----------------------------------------------------------------------------- |
|
30 | 29 | # Test functions begin |
|
31 | 30 | #----------------------------------------------------------------------------- |
|
32 | 31 | class Test_magic_run_completer(unittest.TestCase): |
|
33 | 32 | files = [u"aao.py", u"a.py", u"b.py", u"aao.txt"] |
|
34 | 33 | dirs = [u"adir/", "bdir/"] |
|
35 | 34 | |
|
36 | 35 | def setUp(self): |
|
37 | 36 | self.BASETESTDIR = tempfile.mkdtemp() |
|
38 | 37 | for fil in self.files: |
|
39 | 38 | with open(join(self.BASETESTDIR, fil), "w") as sfile: |
|
40 | 39 | sfile.write("pass\n") |
|
41 | 40 | for d in self.dirs: |
|
42 | 41 | os.mkdir(join(self.BASETESTDIR, d)) |
|
43 | 42 | |
|
44 | 43 | self.oldpath = os.getcwd() |
|
45 | 44 | os.chdir(self.BASETESTDIR) |
|
46 | 45 | |
|
47 | 46 | def tearDown(self): |
|
48 | 47 | os.chdir(self.oldpath) |
|
49 | 48 | shutil.rmtree(self.BASETESTDIR) |
|
50 | 49 | |
|
51 | 50 | def test_1(self): |
|
52 | 51 | """Test magic_run_completer, should match two alterntives |
|
53 | 52 | """ |
|
54 | 53 | event = MockEvent(u"%run a") |
|
55 | 54 | mockself = None |
|
56 | 55 | match = set(magic_run_completer(mockself, event)) |
|
57 | 56 | self.assertEqual(match, {u"a.py", u"aao.py", u"adir/"}) |
|
58 | 57 | |
|
59 | 58 | def test_2(self): |
|
60 | 59 | """Test magic_run_completer, should match one alterntive |
|
61 | 60 | """ |
|
62 | 61 | event = MockEvent(u"%run aa") |
|
63 | 62 | mockself = None |
|
64 | 63 | match = set(magic_run_completer(mockself, event)) |
|
65 | 64 | self.assertEqual(match, {u"aao.py"}) |
|
66 | 65 | |
|
67 | 66 | def test_3(self): |
|
68 | 67 | """Test magic_run_completer with unterminated " """ |
|
69 | 68 | event = MockEvent(u'%run "a') |
|
70 | 69 | mockself = None |
|
71 | 70 | match = set(magic_run_completer(mockself, event)) |
|
72 | 71 | self.assertEqual(match, {u"a.py", u"aao.py", u"adir/"}) |
|
73 | 72 | |
|
74 | 73 | def test_completion_more_args(self): |
|
75 | 74 | event = MockEvent(u'%run a.py ') |
|
76 | 75 | match = set(magic_run_completer(None, event)) |
|
77 | 76 | self.assertEqual(match, set(self.files + self.dirs)) |
|
78 | 77 | |
|
79 | 78 | def test_completion_in_dir(self): |
|
80 | 79 | # Github issue #3459 |
|
81 | 80 | event = MockEvent(u'%run a.py {}'.format(join(self.BASETESTDIR, 'a'))) |
|
82 | 81 | print(repr(event.line)) |
|
83 | 82 | match = set(magic_run_completer(None, event)) |
|
84 | 83 | # We specifically use replace here rather than normpath, because |
|
85 | 84 | # at one point there were duplicates 'adir' and 'adir/', and normpath |
|
86 | 85 | # would hide the failure for that. |
|
87 | 86 | self.assertEqual(match, {join(self.BASETESTDIR, f).replace('\\','/') |
|
88 | 87 | for f in (u'a.py', u'aao.py', u'aao.txt', u'adir/')}) |
|
89 | 88 | |
|
90 | 89 | class Test_magic_run_completer_nonascii(unittest.TestCase): |
|
91 | 90 | @onlyif_unicode_paths |
|
92 | 91 | def setUp(self): |
|
93 | 92 | self.BASETESTDIR = tempfile.mkdtemp() |
|
94 | 93 | for fil in [u"aaø.py", u"a.py", u"b.py"]: |
|
95 | 94 | with open(join(self.BASETESTDIR, fil), "w") as sfile: |
|
96 | 95 | sfile.write("pass\n") |
|
97 | 96 | self.oldpath = os.getcwd() |
|
98 | 97 | os.chdir(self.BASETESTDIR) |
|
99 | 98 | |
|
100 | 99 | def tearDown(self): |
|
101 | 100 | os.chdir(self.oldpath) |
|
102 | 101 | shutil.rmtree(self.BASETESTDIR) |
|
103 | 102 | |
|
104 | 103 | @onlyif_unicode_paths |
|
105 | 104 | def test_1(self): |
|
106 | 105 | """Test magic_run_completer, should match two alterntives |
|
107 | 106 | """ |
|
108 | 107 | event = MockEvent(u"%run a") |
|
109 | 108 | mockself = None |
|
110 | 109 | match = set(magic_run_completer(mockself, event)) |
|
111 | 110 | self.assertEqual(match, {u"a.py", u"aaø.py"}) |
|
112 | 111 | |
|
113 | 112 | @onlyif_unicode_paths |
|
114 | 113 | def test_2(self): |
|
115 | 114 | """Test magic_run_completer, should match one alterntive |
|
116 | 115 | """ |
|
117 | 116 | event = MockEvent(u"%run aa") |
|
118 | 117 | mockself = None |
|
119 | 118 | match = set(magic_run_completer(mockself, event)) |
|
120 | 119 | self.assertEqual(match, {u"aaø.py"}) |
|
121 | 120 | |
|
122 | 121 | @onlyif_unicode_paths |
|
123 | 122 | def test_3(self): |
|
124 | 123 | """Test magic_run_completer with unterminated " """ |
|
125 | 124 | event = MockEvent(u'%run "a') |
|
126 | 125 | mockself = None |
|
127 | 126 | match = set(magic_run_completer(mockself, event)) |
|
128 | 127 | self.assertEqual(match, {u"a.py", u"aaø.py"}) |
|
129 | 128 | |
|
130 | 129 | # module_completer: |
|
131 | 130 | |
|
132 | 131 | def test_import_invalid_module(): |
|
133 | 132 | """Testing of issue https://github.com/ipython/ipython/issues/1107""" |
|
134 | 133 | invalid_module_names = {'foo-bar', 'foo:bar', '10foo'} |
|
135 | 134 | valid_module_names = {'foobar'} |
|
136 | 135 | with TemporaryDirectory() as tmpdir: |
|
137 | 136 | sys.path.insert( 0, tmpdir ) |
|
138 | 137 | for name in invalid_module_names | valid_module_names: |
|
139 | 138 | filename = os.path.join(tmpdir, name + '.py') |
|
140 | 139 | open(filename, 'w').close() |
|
141 | 140 | |
|
142 | 141 | s = set( module_completion('import foo') ) |
|
143 | 142 | intersection = s.intersection(invalid_module_names) |
|
144 | 143 | nt.assert_equal(intersection, set()) |
|
145 | 144 | |
|
146 | 145 | assert valid_module_names.issubset(s), valid_module_names.intersection(s) |
|
147 | 146 | |
|
148 | 147 | |
|
149 | 148 | def test_bad_module_all(): |
|
150 | 149 | """Test module with invalid __all__ |
|
151 | 150 | |
|
152 | 151 | https://github.com/ipython/ipython/issues/9678 |
|
153 | 152 | """ |
|
154 | 153 | testsdir = os.path.dirname(__file__) |
|
155 | 154 | sys.path.insert(0, testsdir) |
|
156 | 155 | try: |
|
157 | 156 | results = module_completion('from bad_all import ') |
|
158 | 157 | nt.assert_in('puppies', results) |
|
159 | 158 | for r in results: |
|
160 | 159 | nt.assert_is_instance(r, str) |
|
161 | 160 | finally: |
|
162 | 161 | sys.path.remove(testsdir) |
@@ -1,211 +1,210 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | """Tests for the IPython tab-completion machinery. |
|
3 | 3 | """ |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Module imports |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | |
|
8 | 8 | # stdlib |
|
9 | 9 | import io |
|
10 | 10 | import os |
|
11 | 11 | import sys |
|
12 | 12 | import tempfile |
|
13 | 13 | from datetime import datetime |
|
14 | 14 | |
|
15 | 15 | # third party |
|
16 | 16 | import nose.tools as nt |
|
17 | 17 | |
|
18 | 18 | # our own packages |
|
19 | 19 | from traitlets.config.loader import Config |
|
20 | 20 | from IPython.utils.tempdir import TemporaryDirectory |
|
21 | 21 | from IPython.core.history import HistoryManager, extract_hist_ranges |
|
22 | from IPython.utils import py3compat | |
|
23 | 22 | |
|
24 | 23 | def setUp(): |
|
25 | 24 | nt.assert_equal(sys.getdefaultencoding(), "utf-8") |
|
26 | 25 | |
|
27 | 26 | def test_history(): |
|
28 | 27 | ip = get_ipython() |
|
29 | 28 | with TemporaryDirectory() as tmpdir: |
|
30 | 29 | hist_manager_ori = ip.history_manager |
|
31 | 30 | hist_file = os.path.join(tmpdir, 'history.sqlite') |
|
32 | 31 | try: |
|
33 | 32 | ip.history_manager = HistoryManager(shell=ip, hist_file=hist_file) |
|
34 | 33 | hist = [u'a=1', u'def f():\n test = 1\n return test', u"b='€Æ¾÷ß'"] |
|
35 | 34 | for i, h in enumerate(hist, start=1): |
|
36 | 35 | ip.history_manager.store_inputs(i, h) |
|
37 | 36 | |
|
38 | 37 | ip.history_manager.db_log_output = True |
|
39 | 38 | # Doesn't match the input, but we'll just check it's stored. |
|
40 | 39 | ip.history_manager.output_hist_reprs[3] = "spam" |
|
41 | 40 | ip.history_manager.store_output(3) |
|
42 | 41 | |
|
43 | 42 | nt.assert_equal(ip.history_manager.input_hist_raw, [''] + hist) |
|
44 | 43 | |
|
45 | 44 | # Detailed tests for _get_range_session |
|
46 | 45 | grs = ip.history_manager._get_range_session |
|
47 | 46 | nt.assert_equal(list(grs(start=2,stop=-1)), list(zip([0], [2], hist[1:-1]))) |
|
48 | 47 | nt.assert_equal(list(grs(start=-2)), list(zip([0,0], [2,3], hist[-2:]))) |
|
49 | 48 | nt.assert_equal(list(grs(output=True)), list(zip([0,0,0], [1,2,3], zip(hist, [None,None,'spam'])))) |
|
50 | 49 | |
|
51 | 50 | # Check whether specifying a range beyond the end of the current |
|
52 | 51 | # session results in an error (gh-804) |
|
53 | 52 | ip.magic('%hist 2-500') |
|
54 | 53 | |
|
55 | 54 | # Check that we can write non-ascii characters to a file |
|
56 | 55 | ip.magic("%%hist -f %s" % os.path.join(tmpdir, "test1")) |
|
57 | 56 | ip.magic("%%hist -pf %s" % os.path.join(tmpdir, "test2")) |
|
58 | 57 | ip.magic("%%hist -nf %s" % os.path.join(tmpdir, "test3")) |
|
59 | 58 | ip.magic("%%save %s 1-10" % os.path.join(tmpdir, "test4")) |
|
60 | 59 | |
|
61 | 60 | # New session |
|
62 | 61 | ip.history_manager.reset() |
|
63 | 62 | newcmds = [u"z=5", |
|
64 | 63 | u"class X(object):\n pass", |
|
65 | 64 | u"k='p'", |
|
66 | 65 | u"z=5"] |
|
67 | 66 | for i, cmd in enumerate(newcmds, start=1): |
|
68 | 67 | ip.history_manager.store_inputs(i, cmd) |
|
69 | 68 | gothist = ip.history_manager.get_range(start=1, stop=4) |
|
70 | 69 | nt.assert_equal(list(gothist), list(zip([0,0,0],[1,2,3], newcmds))) |
|
71 | 70 | # Previous session: |
|
72 | 71 | gothist = ip.history_manager.get_range(-1, 1, 4) |
|
73 | 72 | nt.assert_equal(list(gothist), list(zip([1,1,1],[1,2,3], hist))) |
|
74 | 73 | |
|
75 | 74 | newhist = [(2, i, c) for (i, c) in enumerate(newcmds, 1)] |
|
76 | 75 | |
|
77 | 76 | # Check get_hist_tail |
|
78 | 77 | gothist = ip.history_manager.get_tail(5, output=True, |
|
79 | 78 | include_latest=True) |
|
80 | 79 | expected = [(1, 3, (hist[-1], "spam"))] \ |
|
81 | 80 | + [(s, n, (c, None)) for (s, n, c) in newhist] |
|
82 | 81 | nt.assert_equal(list(gothist), expected) |
|
83 | 82 | |
|
84 | 83 | gothist = ip.history_manager.get_tail(2) |
|
85 | 84 | expected = newhist[-3:-1] |
|
86 | 85 | nt.assert_equal(list(gothist), expected) |
|
87 | 86 | |
|
88 | 87 | # Check get_hist_search |
|
89 | 88 | gothist = ip.history_manager.search("*test*") |
|
90 | 89 | nt.assert_equal(list(gothist), [(1,2,hist[1])] ) |
|
91 | 90 | |
|
92 | 91 | gothist = ip.history_manager.search("*=*") |
|
93 | 92 | nt.assert_equal(list(gothist), |
|
94 | 93 | [(1, 1, hist[0]), |
|
95 | 94 | (1, 2, hist[1]), |
|
96 | 95 | (1, 3, hist[2]), |
|
97 | 96 | newhist[0], |
|
98 | 97 | newhist[2], |
|
99 | 98 | newhist[3]]) |
|
100 | 99 | |
|
101 | 100 | gothist = ip.history_manager.search("*=*", n=4) |
|
102 | 101 | nt.assert_equal(list(gothist), |
|
103 | 102 | [(1, 3, hist[2]), |
|
104 | 103 | newhist[0], |
|
105 | 104 | newhist[2], |
|
106 | 105 | newhist[3]]) |
|
107 | 106 | |
|
108 | 107 | gothist = ip.history_manager.search("*=*", unique=True) |
|
109 | 108 | nt.assert_equal(list(gothist), |
|
110 | 109 | [(1, 1, hist[0]), |
|
111 | 110 | (1, 2, hist[1]), |
|
112 | 111 | (1, 3, hist[2]), |
|
113 | 112 | newhist[2], |
|
114 | 113 | newhist[3]]) |
|
115 | 114 | |
|
116 | 115 | gothist = ip.history_manager.search("*=*", unique=True, n=3) |
|
117 | 116 | nt.assert_equal(list(gothist), |
|
118 | 117 | [(1, 3, hist[2]), |
|
119 | 118 | newhist[2], |
|
120 | 119 | newhist[3]]) |
|
121 | 120 | |
|
122 | 121 | gothist = ip.history_manager.search("b*", output=True) |
|
123 | 122 | nt.assert_equal(list(gothist), [(1,3,(hist[2],"spam"))] ) |
|
124 | 123 | |
|
125 | 124 | # Cross testing: check that magic %save can get previous session. |
|
126 | 125 | testfilename = os.path.realpath(os.path.join(tmpdir, "test.py")) |
|
127 | 126 | ip.magic("save " + testfilename + " ~1/1-3") |
|
128 | 127 | with io.open(testfilename, encoding='utf-8') as testfile: |
|
129 | 128 | nt.assert_equal(testfile.read(), |
|
130 | 129 | u"# coding: utf-8\n" + u"\n".join(hist)+u"\n") |
|
131 | 130 | |
|
132 | 131 | # Duplicate line numbers - check that it doesn't crash, and |
|
133 | 132 | # gets a new session |
|
134 | 133 | ip.history_manager.store_inputs(1, "rogue") |
|
135 | 134 | ip.history_manager.writeout_cache() |
|
136 | 135 | nt.assert_equal(ip.history_manager.session_number, 3) |
|
137 | 136 | finally: |
|
138 | 137 | # Ensure saving thread is shut down before we try to clean up the files |
|
139 | 138 | ip.history_manager.save_thread.stop() |
|
140 | 139 | # Forcibly close database rather than relying on garbage collection |
|
141 | 140 | ip.history_manager.db.close() |
|
142 | 141 | # Restore history manager |
|
143 | 142 | ip.history_manager = hist_manager_ori |
|
144 | 143 | |
|
145 | 144 | |
|
146 | 145 | def test_extract_hist_ranges(): |
|
147 | 146 | instr = "1 2/3 ~4/5-6 ~4/7-~4/9 ~9/2-~7/5 ~10/" |
|
148 | 147 | expected = [(0, 1, 2), # 0 == current session |
|
149 | 148 | (2, 3, 4), |
|
150 | 149 | (-4, 5, 7), |
|
151 | 150 | (-4, 7, 10), |
|
152 | 151 | (-9, 2, None), # None == to end |
|
153 | 152 | (-8, 1, None), |
|
154 | 153 | (-7, 1, 6), |
|
155 | 154 | (-10, 1, None)] |
|
156 | 155 | actual = list(extract_hist_ranges(instr)) |
|
157 | 156 | nt.assert_equal(actual, expected) |
|
158 | 157 | |
|
159 | 158 | def test_magic_rerun(): |
|
160 | 159 | """Simple test for %rerun (no args -> rerun last line)""" |
|
161 | 160 | ip = get_ipython() |
|
162 | 161 | ip.run_cell("a = 10", store_history=True) |
|
163 | 162 | ip.run_cell("a += 1", store_history=True) |
|
164 | 163 | nt.assert_equal(ip.user_ns["a"], 11) |
|
165 | 164 | ip.run_cell("%rerun", store_history=True) |
|
166 | 165 | nt.assert_equal(ip.user_ns["a"], 12) |
|
167 | 166 | |
|
168 | 167 | def test_timestamp_type(): |
|
169 | 168 | ip = get_ipython() |
|
170 | 169 | info = ip.history_manager.get_session_info() |
|
171 | 170 | nt.assert_true(isinstance(info[1], datetime)) |
|
172 | 171 | |
|
173 | 172 | def test_hist_file_config(): |
|
174 | 173 | cfg = Config() |
|
175 | 174 | tfile = tempfile.NamedTemporaryFile(delete=False) |
|
176 | 175 | cfg.HistoryManager.hist_file = tfile.name |
|
177 | 176 | try: |
|
178 | 177 | hm = HistoryManager(shell=get_ipython(), config=cfg) |
|
179 | 178 | nt.assert_equal(hm.hist_file, cfg.HistoryManager.hist_file) |
|
180 | 179 | finally: |
|
181 | 180 | try: |
|
182 | 181 | os.remove(tfile.name) |
|
183 | 182 | except OSError: |
|
184 | 183 | # same catch as in testing.tools.TempFileMixin |
|
185 | 184 | # On Windows, even though we close the file, we still can't |
|
186 | 185 | # delete it. I have no clue why |
|
187 | 186 | pass |
|
188 | 187 | |
|
189 | 188 | def test_histmanager_disabled(): |
|
190 | 189 | """Ensure that disabling the history manager doesn't create a database.""" |
|
191 | 190 | cfg = Config() |
|
192 | 191 | cfg.HistoryAccessor.enabled = False |
|
193 | 192 | |
|
194 | 193 | ip = get_ipython() |
|
195 | 194 | with TemporaryDirectory() as tmpdir: |
|
196 | 195 | hist_manager_ori = ip.history_manager |
|
197 | 196 | hist_file = os.path.join(tmpdir, 'history.sqlite') |
|
198 | 197 | cfg.HistoryManager.hist_file = hist_file |
|
199 | 198 | try: |
|
200 | 199 | ip.history_manager = HistoryManager(shell=ip, config=cfg) |
|
201 | 200 | hist = [u'a=1', u'def f():\n test = 1\n return test', u"b='€Æ¾÷ß'"] |
|
202 | 201 | for i, h in enumerate(hist, start=1): |
|
203 | 202 | ip.history_manager.store_inputs(i, h) |
|
204 | 203 | nt.assert_equal(ip.history_manager.input_hist_raw, [''] + hist) |
|
205 | 204 | ip.history_manager.reset() |
|
206 | 205 | ip.history_manager.end_session() |
|
207 | 206 | finally: |
|
208 | 207 | ip.history_manager = hist_manager_ori |
|
209 | 208 | |
|
210 | 209 | # hist_file should not be created |
|
211 | 210 | nt.assert_false(os.path.exists(hist_file)) |
@@ -1,926 +1,924 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Tests for the key interactiveshell module. |
|
3 | 3 | |
|
4 | 4 | Historically the main classes in interactiveshell have been under-tested. This |
|
5 | 5 | module should grow as many single-method tests as possible to trap many of the |
|
6 | 6 | recurring bugs we seem to encounter with high-level interaction. |
|
7 | 7 | """ |
|
8 | 8 | |
|
9 | 9 | # Copyright (c) IPython Development Team. |
|
10 | 10 | # Distributed under the terms of the Modified BSD License. |
|
11 | 11 | |
|
12 | 12 | import ast |
|
13 | 13 | import os |
|
14 | 14 | import signal |
|
15 | 15 | import shutil |
|
16 | 16 | import sys |
|
17 | 17 | import tempfile |
|
18 | 18 | import unittest |
|
19 | 19 | from unittest import mock |
|
20 | from io import StringIO | |
|
21 | 20 | |
|
22 | 21 | from os.path import join |
|
23 | 22 | |
|
24 | 23 | import nose.tools as nt |
|
25 | 24 | |
|
26 | 25 | from IPython.core.error import InputRejected |
|
27 | 26 | from IPython.core.inputtransformer import InputTransformer |
|
28 | 27 | from IPython.testing.decorators import ( |
|
29 | 28 | skipif, skip_win32, onlyif_unicode_paths, onlyif_cmds_exist, |
|
30 | 29 | ) |
|
31 | 30 | from IPython.testing import tools as tt |
|
32 | 31 | from IPython.utils.process import find_cmd |
|
33 | from IPython.utils import py3compat | |
|
34 | 32 | |
|
35 | 33 | #----------------------------------------------------------------------------- |
|
36 | 34 | # Globals |
|
37 | 35 | #----------------------------------------------------------------------------- |
|
38 | 36 | # This is used by every single test, no point repeating it ad nauseam |
|
39 | 37 | ip = get_ipython() |
|
40 | 38 | |
|
41 | 39 | #----------------------------------------------------------------------------- |
|
42 | 40 | # Tests |
|
43 | 41 | #----------------------------------------------------------------------------- |
|
44 | 42 | |
|
45 | 43 | class DerivedInterrupt(KeyboardInterrupt): |
|
46 | 44 | pass |
|
47 | 45 | |
|
48 | 46 | class InteractiveShellTestCase(unittest.TestCase): |
|
49 | 47 | def test_naked_string_cells(self): |
|
50 | 48 | """Test that cells with only naked strings are fully executed""" |
|
51 | 49 | # First, single-line inputs |
|
52 | 50 | ip.run_cell('"a"\n') |
|
53 | 51 | self.assertEqual(ip.user_ns['_'], 'a') |
|
54 | 52 | # And also multi-line cells |
|
55 | 53 | ip.run_cell('"""a\nb"""\n') |
|
56 | 54 | self.assertEqual(ip.user_ns['_'], 'a\nb') |
|
57 | 55 | |
|
58 | 56 | def test_run_empty_cell(self): |
|
59 | 57 | """Just make sure we don't get a horrible error with a blank |
|
60 | 58 | cell of input. Yes, I did overlook that.""" |
|
61 | 59 | old_xc = ip.execution_count |
|
62 | 60 | res = ip.run_cell('') |
|
63 | 61 | self.assertEqual(ip.execution_count, old_xc) |
|
64 | 62 | self.assertEqual(res.execution_count, None) |
|
65 | 63 | |
|
66 | 64 | def test_run_cell_multiline(self): |
|
67 | 65 | """Multi-block, multi-line cells must execute correctly. |
|
68 | 66 | """ |
|
69 | 67 | src = '\n'.join(["x=1", |
|
70 | 68 | "y=2", |
|
71 | 69 | "if 1:", |
|
72 | 70 | " x += 1", |
|
73 | 71 | " y += 1",]) |
|
74 | 72 | res = ip.run_cell(src) |
|
75 | 73 | self.assertEqual(ip.user_ns['x'], 2) |
|
76 | 74 | self.assertEqual(ip.user_ns['y'], 3) |
|
77 | 75 | self.assertEqual(res.success, True) |
|
78 | 76 | self.assertEqual(res.result, None) |
|
79 | 77 | |
|
80 | 78 | def test_multiline_string_cells(self): |
|
81 | 79 | "Code sprinkled with multiline strings should execute (GH-306)" |
|
82 | 80 | ip.run_cell('tmp=0') |
|
83 | 81 | self.assertEqual(ip.user_ns['tmp'], 0) |
|
84 | 82 | res = ip.run_cell('tmp=1;"""a\nb"""\n') |
|
85 | 83 | self.assertEqual(ip.user_ns['tmp'], 1) |
|
86 | 84 | self.assertEqual(res.success, True) |
|
87 | 85 | self.assertEqual(res.result, "a\nb") |
|
88 | 86 | |
|
89 | 87 | def test_dont_cache_with_semicolon(self): |
|
90 | 88 | "Ending a line with semicolon should not cache the returned object (GH-307)" |
|
91 | 89 | oldlen = len(ip.user_ns['Out']) |
|
92 | 90 | for cell in ['1;', '1;1;']: |
|
93 | 91 | res = ip.run_cell(cell, store_history=True) |
|
94 | 92 | newlen = len(ip.user_ns['Out']) |
|
95 | 93 | self.assertEqual(oldlen, newlen) |
|
96 | 94 | self.assertIsNone(res.result) |
|
97 | 95 | i = 0 |
|
98 | 96 | #also test the default caching behavior |
|
99 | 97 | for cell in ['1', '1;1']: |
|
100 | 98 | ip.run_cell(cell, store_history=True) |
|
101 | 99 | newlen = len(ip.user_ns['Out']) |
|
102 | 100 | i += 1 |
|
103 | 101 | self.assertEqual(oldlen+i, newlen) |
|
104 | 102 | |
|
105 | 103 | def test_syntax_error(self): |
|
106 | 104 | res = ip.run_cell("raise = 3") |
|
107 | 105 | self.assertIsInstance(res.error_before_exec, SyntaxError) |
|
108 | 106 | |
|
109 | 107 | def test_In_variable(self): |
|
110 | 108 | "Verify that In variable grows with user input (GH-284)" |
|
111 | 109 | oldlen = len(ip.user_ns['In']) |
|
112 | 110 | ip.run_cell('1;', store_history=True) |
|
113 | 111 | newlen = len(ip.user_ns['In']) |
|
114 | 112 | self.assertEqual(oldlen+1, newlen) |
|
115 | 113 | self.assertEqual(ip.user_ns['In'][-1],'1;') |
|
116 | 114 | |
|
117 | 115 | def test_magic_names_in_string(self): |
|
118 | 116 | ip.run_cell('a = """\n%exit\n"""') |
|
119 | 117 | self.assertEqual(ip.user_ns['a'], '\n%exit\n') |
|
120 | 118 | |
|
121 | 119 | def test_trailing_newline(self): |
|
122 | 120 | """test that running !(command) does not raise a SyntaxError""" |
|
123 | 121 | ip.run_cell('!(true)\n', False) |
|
124 | 122 | ip.run_cell('!(true)\n\n\n', False) |
|
125 | 123 | |
|
126 | 124 | def test_gh_597(self): |
|
127 | 125 | """Pretty-printing lists of objects with non-ascii reprs may cause |
|
128 | 126 | problems.""" |
|
129 | 127 | class Spam(object): |
|
130 | 128 | def __repr__(self): |
|
131 | 129 | return "\xe9"*50 |
|
132 | 130 | import IPython.core.formatters |
|
133 | 131 | f = IPython.core.formatters.PlainTextFormatter() |
|
134 | 132 | f([Spam(),Spam()]) |
|
135 | 133 | |
|
136 | 134 | |
|
137 | 135 | def test_future_flags(self): |
|
138 | 136 | """Check that future flags are used for parsing code (gh-777)""" |
|
139 | 137 | ip.run_cell('from __future__ import barry_as_FLUFL') |
|
140 | 138 | try: |
|
141 | 139 | ip.run_cell('prfunc_return_val = 1 <> 2') |
|
142 | 140 | assert 'prfunc_return_val' in ip.user_ns |
|
143 | 141 | finally: |
|
144 | 142 | # Reset compiler flags so we don't mess up other tests. |
|
145 | 143 | ip.compile.reset_compiler_flags() |
|
146 | 144 | |
|
147 | 145 | def test_can_pickle(self): |
|
148 | 146 | "Can we pickle objects defined interactively (GH-29)" |
|
149 | 147 | ip = get_ipython() |
|
150 | 148 | ip.reset() |
|
151 | 149 | ip.run_cell(("class Mylist(list):\n" |
|
152 | 150 | " def __init__(self,x=[]):\n" |
|
153 | 151 | " list.__init__(self,x)")) |
|
154 | 152 | ip.run_cell("w=Mylist([1,2,3])") |
|
155 | 153 | |
|
156 | 154 | from pickle import dumps |
|
157 | 155 | |
|
158 | 156 | # We need to swap in our main module - this is only necessary |
|
159 | 157 | # inside the test framework, because IPython puts the interactive module |
|
160 | 158 | # in place (but the test framework undoes this). |
|
161 | 159 | _main = sys.modules['__main__'] |
|
162 | 160 | sys.modules['__main__'] = ip.user_module |
|
163 | 161 | try: |
|
164 | 162 | res = dumps(ip.user_ns["w"]) |
|
165 | 163 | finally: |
|
166 | 164 | sys.modules['__main__'] = _main |
|
167 | 165 | self.assertTrue(isinstance(res, bytes)) |
|
168 | 166 | |
|
169 | 167 | def test_global_ns(self): |
|
170 | 168 | "Code in functions must be able to access variables outside them." |
|
171 | 169 | ip = get_ipython() |
|
172 | 170 | ip.run_cell("a = 10") |
|
173 | 171 | ip.run_cell(("def f(x):\n" |
|
174 | 172 | " return x + a")) |
|
175 | 173 | ip.run_cell("b = f(12)") |
|
176 | 174 | self.assertEqual(ip.user_ns["b"], 22) |
|
177 | 175 | |
|
178 | 176 | def test_bad_custom_tb(self): |
|
179 | 177 | """Check that InteractiveShell is protected from bad custom exception handlers""" |
|
180 | 178 | ip.set_custom_exc((IOError,), lambda etype,value,tb: 1/0) |
|
181 | 179 | self.assertEqual(ip.custom_exceptions, (IOError,)) |
|
182 | 180 | with tt.AssertPrints("Custom TB Handler failed", channel='stderr'): |
|
183 | 181 | ip.run_cell(u'raise IOError("foo")') |
|
184 | 182 | self.assertEqual(ip.custom_exceptions, ()) |
|
185 | 183 | |
|
186 | 184 | def test_bad_custom_tb_return(self): |
|
187 | 185 | """Check that InteractiveShell is protected from bad return types in custom exception handlers""" |
|
188 | 186 | ip.set_custom_exc((NameError,),lambda etype,value,tb, tb_offset=None: 1) |
|
189 | 187 | self.assertEqual(ip.custom_exceptions, (NameError,)) |
|
190 | 188 | with tt.AssertPrints("Custom TB Handler failed", channel='stderr'): |
|
191 | 189 | ip.run_cell(u'a=abracadabra') |
|
192 | 190 | self.assertEqual(ip.custom_exceptions, ()) |
|
193 | 191 | |
|
194 | 192 | def test_drop_by_id(self): |
|
195 | 193 | myvars = {"a":object(), "b":object(), "c": object()} |
|
196 | 194 | ip.push(myvars, interactive=False) |
|
197 | 195 | for name in myvars: |
|
198 | 196 | assert name in ip.user_ns, name |
|
199 | 197 | assert name in ip.user_ns_hidden, name |
|
200 | 198 | ip.user_ns['b'] = 12 |
|
201 | 199 | ip.drop_by_id(myvars) |
|
202 | 200 | for name in ["a", "c"]: |
|
203 | 201 | assert name not in ip.user_ns, name |
|
204 | 202 | assert name not in ip.user_ns_hidden, name |
|
205 | 203 | assert ip.user_ns['b'] == 12 |
|
206 | 204 | ip.reset() |
|
207 | 205 | |
|
208 | 206 | def test_var_expand(self): |
|
209 | 207 | ip.user_ns['f'] = u'Ca\xf1o' |
|
210 | 208 | self.assertEqual(ip.var_expand(u'echo $f'), u'echo Ca\xf1o') |
|
211 | 209 | self.assertEqual(ip.var_expand(u'echo {f}'), u'echo Ca\xf1o') |
|
212 | 210 | self.assertEqual(ip.var_expand(u'echo {f[:-1]}'), u'echo Ca\xf1') |
|
213 | 211 | self.assertEqual(ip.var_expand(u'echo {1*2}'), u'echo 2') |
|
214 | 212 | |
|
215 | 213 | self.assertEqual(ip.var_expand(u"grep x | awk '{print $1}'"), u"grep x | awk '{print $1}'") |
|
216 | 214 | |
|
217 | 215 | ip.user_ns['f'] = b'Ca\xc3\xb1o' |
|
218 | 216 | # This should not raise any exception: |
|
219 | 217 | ip.var_expand(u'echo $f') |
|
220 | 218 | |
|
221 | 219 | def test_var_expand_local(self): |
|
222 | 220 | """Test local variable expansion in !system and %magic calls""" |
|
223 | 221 | # !system |
|
224 | 222 | ip.run_cell('def test():\n' |
|
225 | 223 | ' lvar = "ttt"\n' |
|
226 | 224 | ' ret = !echo {lvar}\n' |
|
227 | 225 | ' return ret[0]\n') |
|
228 | 226 | res = ip.user_ns['test']() |
|
229 | 227 | nt.assert_in('ttt', res) |
|
230 | 228 | |
|
231 | 229 | # %magic |
|
232 | 230 | ip.run_cell('def makemacro():\n' |
|
233 | 231 | ' macroname = "macro_var_expand_locals"\n' |
|
234 | 232 | ' %macro {macroname} codestr\n') |
|
235 | 233 | ip.user_ns['codestr'] = "str(12)" |
|
236 | 234 | ip.run_cell('makemacro()') |
|
237 | 235 | nt.assert_in('macro_var_expand_locals', ip.user_ns) |
|
238 | 236 | |
|
239 | 237 | def test_var_expand_self(self): |
|
240 | 238 | """Test variable expansion with the name 'self', which was failing. |
|
241 | 239 | |
|
242 | 240 | See https://github.com/ipython/ipython/issues/1878#issuecomment-7698218 |
|
243 | 241 | """ |
|
244 | 242 | ip.run_cell('class cTest:\n' |
|
245 | 243 | ' classvar="see me"\n' |
|
246 | 244 | ' def test(self):\n' |
|
247 | 245 | ' res = !echo Variable: {self.classvar}\n' |
|
248 | 246 | ' return res[0]\n') |
|
249 | 247 | nt.assert_in('see me', ip.user_ns['cTest']().test()) |
|
250 | 248 | |
|
251 | 249 | def test_bad_var_expand(self): |
|
252 | 250 | """var_expand on invalid formats shouldn't raise""" |
|
253 | 251 | # SyntaxError |
|
254 | 252 | self.assertEqual(ip.var_expand(u"{'a':5}"), u"{'a':5}") |
|
255 | 253 | # NameError |
|
256 | 254 | self.assertEqual(ip.var_expand(u"{asdf}"), u"{asdf}") |
|
257 | 255 | # ZeroDivisionError |
|
258 | 256 | self.assertEqual(ip.var_expand(u"{1/0}"), u"{1/0}") |
|
259 | 257 | |
|
260 | 258 | def test_silent_postexec(self): |
|
261 | 259 | """run_cell(silent=True) doesn't invoke pre/post_run_cell callbacks""" |
|
262 | 260 | pre_explicit = mock.Mock() |
|
263 | 261 | pre_always = mock.Mock() |
|
264 | 262 | post_explicit = mock.Mock() |
|
265 | 263 | post_always = mock.Mock() |
|
266 | 264 | all_mocks = [pre_explicit, pre_always, post_explicit, post_always] |
|
267 | 265 | |
|
268 | 266 | ip.events.register('pre_run_cell', pre_explicit) |
|
269 | 267 | ip.events.register('pre_execute', pre_always) |
|
270 | 268 | ip.events.register('post_run_cell', post_explicit) |
|
271 | 269 | ip.events.register('post_execute', post_always) |
|
272 | 270 | |
|
273 | 271 | try: |
|
274 | 272 | ip.run_cell("1", silent=True) |
|
275 | 273 | assert pre_always.called |
|
276 | 274 | assert not pre_explicit.called |
|
277 | 275 | assert post_always.called |
|
278 | 276 | assert not post_explicit.called |
|
279 | 277 | # double-check that non-silent exec did what we expected |
|
280 | 278 | # silent to avoid |
|
281 | 279 | ip.run_cell("1") |
|
282 | 280 | assert pre_explicit.called |
|
283 | 281 | assert post_explicit.called |
|
284 | 282 | info, = pre_explicit.call_args[0] |
|
285 | 283 | result, = post_explicit.call_args[0] |
|
286 | 284 | self.assertEqual(info, result.info) |
|
287 | 285 | # check that post hooks are always called |
|
288 | 286 | [m.reset_mock() for m in all_mocks] |
|
289 | 287 | ip.run_cell("syntax error") |
|
290 | 288 | assert pre_always.called |
|
291 | 289 | assert pre_explicit.called |
|
292 | 290 | assert post_always.called |
|
293 | 291 | assert post_explicit.called |
|
294 | 292 | info, = pre_explicit.call_args[0] |
|
295 | 293 | result, = post_explicit.call_args[0] |
|
296 | 294 | self.assertEqual(info, result.info) |
|
297 | 295 | finally: |
|
298 | 296 | # remove post-exec |
|
299 | 297 | ip.events.unregister('pre_run_cell', pre_explicit) |
|
300 | 298 | ip.events.unregister('pre_execute', pre_always) |
|
301 | 299 | ip.events.unregister('post_run_cell', post_explicit) |
|
302 | 300 | ip.events.unregister('post_execute', post_always) |
|
303 | 301 | |
|
304 | 302 | def test_silent_noadvance(self): |
|
305 | 303 | """run_cell(silent=True) doesn't advance execution_count""" |
|
306 | 304 | ec = ip.execution_count |
|
307 | 305 | # silent should force store_history=False |
|
308 | 306 | ip.run_cell("1", store_history=True, silent=True) |
|
309 | 307 | |
|
310 | 308 | self.assertEqual(ec, ip.execution_count) |
|
311 | 309 | # double-check that non-silent exec did what we expected |
|
312 | 310 | # silent to avoid |
|
313 | 311 | ip.run_cell("1", store_history=True) |
|
314 | 312 | self.assertEqual(ec+1, ip.execution_count) |
|
315 | 313 | |
|
316 | 314 | def test_silent_nodisplayhook(self): |
|
317 | 315 | """run_cell(silent=True) doesn't trigger displayhook""" |
|
318 | 316 | d = dict(called=False) |
|
319 | 317 | |
|
320 | 318 | trap = ip.display_trap |
|
321 | 319 | save_hook = trap.hook |
|
322 | 320 | |
|
323 | 321 | def failing_hook(*args, **kwargs): |
|
324 | 322 | d['called'] = True |
|
325 | 323 | |
|
326 | 324 | try: |
|
327 | 325 | trap.hook = failing_hook |
|
328 | 326 | res = ip.run_cell("1", silent=True) |
|
329 | 327 | self.assertFalse(d['called']) |
|
330 | 328 | self.assertIsNone(res.result) |
|
331 | 329 | # double-check that non-silent exec did what we expected |
|
332 | 330 | # silent to avoid |
|
333 | 331 | ip.run_cell("1") |
|
334 | 332 | self.assertTrue(d['called']) |
|
335 | 333 | finally: |
|
336 | 334 | trap.hook = save_hook |
|
337 | 335 | |
|
338 | 336 | def test_ofind_line_magic(self): |
|
339 | 337 | from IPython.core.magic import register_line_magic |
|
340 | 338 | |
|
341 | 339 | @register_line_magic |
|
342 | 340 | def lmagic(line): |
|
343 | 341 | "A line magic" |
|
344 | 342 | |
|
345 | 343 | # Get info on line magic |
|
346 | 344 | lfind = ip._ofind('lmagic') |
|
347 | 345 | info = dict(found=True, isalias=False, ismagic=True, |
|
348 | 346 | namespace = 'IPython internal', obj= lmagic.__wrapped__, |
|
349 | 347 | parent = None) |
|
350 | 348 | nt.assert_equal(lfind, info) |
|
351 | 349 | |
|
352 | 350 | def test_ofind_cell_magic(self): |
|
353 | 351 | from IPython.core.magic import register_cell_magic |
|
354 | 352 | |
|
355 | 353 | @register_cell_magic |
|
356 | 354 | def cmagic(line, cell): |
|
357 | 355 | "A cell magic" |
|
358 | 356 | |
|
359 | 357 | # Get info on cell magic |
|
360 | 358 | find = ip._ofind('cmagic') |
|
361 | 359 | info = dict(found=True, isalias=False, ismagic=True, |
|
362 | 360 | namespace = 'IPython internal', obj= cmagic.__wrapped__, |
|
363 | 361 | parent = None) |
|
364 | 362 | nt.assert_equal(find, info) |
|
365 | 363 | |
|
366 | 364 | def test_ofind_property_with_error(self): |
|
367 | 365 | class A(object): |
|
368 | 366 | @property |
|
369 | 367 | def foo(self): |
|
370 | 368 | raise NotImplementedError() |
|
371 | 369 | a = A() |
|
372 | 370 | |
|
373 | 371 | found = ip._ofind('a.foo', [('locals', locals())]) |
|
374 | 372 | info = dict(found=True, isalias=False, ismagic=False, |
|
375 | 373 | namespace='locals', obj=A.foo, parent=a) |
|
376 | 374 | nt.assert_equal(found, info) |
|
377 | 375 | |
|
378 | 376 | def test_ofind_multiple_attribute_lookups(self): |
|
379 | 377 | class A(object): |
|
380 | 378 | @property |
|
381 | 379 | def foo(self): |
|
382 | 380 | raise NotImplementedError() |
|
383 | 381 | |
|
384 | 382 | a = A() |
|
385 | 383 | a.a = A() |
|
386 | 384 | a.a.a = A() |
|
387 | 385 | |
|
388 | 386 | found = ip._ofind('a.a.a.foo', [('locals', locals())]) |
|
389 | 387 | info = dict(found=True, isalias=False, ismagic=False, |
|
390 | 388 | namespace='locals', obj=A.foo, parent=a.a.a) |
|
391 | 389 | nt.assert_equal(found, info) |
|
392 | 390 | |
|
393 | 391 | def test_ofind_slotted_attributes(self): |
|
394 | 392 | class A(object): |
|
395 | 393 | __slots__ = ['foo'] |
|
396 | 394 | def __init__(self): |
|
397 | 395 | self.foo = 'bar' |
|
398 | 396 | |
|
399 | 397 | a = A() |
|
400 | 398 | found = ip._ofind('a.foo', [('locals', locals())]) |
|
401 | 399 | info = dict(found=True, isalias=False, ismagic=False, |
|
402 | 400 | namespace='locals', obj=a.foo, parent=a) |
|
403 | 401 | nt.assert_equal(found, info) |
|
404 | 402 | |
|
405 | 403 | found = ip._ofind('a.bar', [('locals', locals())]) |
|
406 | 404 | info = dict(found=False, isalias=False, ismagic=False, |
|
407 | 405 | namespace=None, obj=None, parent=a) |
|
408 | 406 | nt.assert_equal(found, info) |
|
409 | 407 | |
|
410 | 408 | def test_ofind_prefers_property_to_instance_level_attribute(self): |
|
411 | 409 | class A(object): |
|
412 | 410 | @property |
|
413 | 411 | def foo(self): |
|
414 | 412 | return 'bar' |
|
415 | 413 | a = A() |
|
416 | 414 | a.__dict__['foo'] = 'baz' |
|
417 | 415 | nt.assert_equal(a.foo, 'bar') |
|
418 | 416 | found = ip._ofind('a.foo', [('locals', locals())]) |
|
419 | 417 | nt.assert_is(found['obj'], A.foo) |
|
420 | 418 | |
|
421 | 419 | def test_custom_syntaxerror_exception(self): |
|
422 | 420 | called = [] |
|
423 | 421 | def my_handler(shell, etype, value, tb, tb_offset=None): |
|
424 | 422 | called.append(etype) |
|
425 | 423 | shell.showtraceback((etype, value, tb), tb_offset=tb_offset) |
|
426 | 424 | |
|
427 | 425 | ip.set_custom_exc((SyntaxError,), my_handler) |
|
428 | 426 | try: |
|
429 | 427 | ip.run_cell("1f") |
|
430 | 428 | # Check that this was called, and only once. |
|
431 | 429 | self.assertEqual(called, [SyntaxError]) |
|
432 | 430 | finally: |
|
433 | 431 | # Reset the custom exception hook |
|
434 | 432 | ip.set_custom_exc((), None) |
|
435 | 433 | |
|
436 | 434 | def test_custom_exception(self): |
|
437 | 435 | called = [] |
|
438 | 436 | def my_handler(shell, etype, value, tb, tb_offset=None): |
|
439 | 437 | called.append(etype) |
|
440 | 438 | shell.showtraceback((etype, value, tb), tb_offset=tb_offset) |
|
441 | 439 | |
|
442 | 440 | ip.set_custom_exc((ValueError,), my_handler) |
|
443 | 441 | try: |
|
444 | 442 | res = ip.run_cell("raise ValueError('test')") |
|
445 | 443 | # Check that this was called, and only once. |
|
446 | 444 | self.assertEqual(called, [ValueError]) |
|
447 | 445 | # Check that the error is on the result object |
|
448 | 446 | self.assertIsInstance(res.error_in_exec, ValueError) |
|
449 | 447 | finally: |
|
450 | 448 | # Reset the custom exception hook |
|
451 | 449 | ip.set_custom_exc((), None) |
|
452 | 450 | |
|
453 | 451 | def test_mktempfile(self): |
|
454 | 452 | filename = ip.mktempfile() |
|
455 | 453 | # Check that we can open the file again on Windows |
|
456 | 454 | with open(filename, 'w') as f: |
|
457 | 455 | f.write('abc') |
|
458 | 456 | |
|
459 | 457 | filename = ip.mktempfile(data='blah') |
|
460 | 458 | with open(filename, 'r') as f: |
|
461 | 459 | self.assertEqual(f.read(), 'blah') |
|
462 | 460 | |
|
463 | 461 | def test_new_main_mod(self): |
|
464 | 462 | # Smoketest to check that this accepts a unicode module name |
|
465 | 463 | name = u'jiefmw' |
|
466 | 464 | mod = ip.new_main_mod(u'%s.py' % name, name) |
|
467 | 465 | self.assertEqual(mod.__name__, name) |
|
468 | 466 | |
|
469 | 467 | def test_get_exception_only(self): |
|
470 | 468 | try: |
|
471 | 469 | raise KeyboardInterrupt |
|
472 | 470 | except KeyboardInterrupt: |
|
473 | 471 | msg = ip.get_exception_only() |
|
474 | 472 | self.assertEqual(msg, 'KeyboardInterrupt\n') |
|
475 | 473 | |
|
476 | 474 | try: |
|
477 | 475 | raise DerivedInterrupt("foo") |
|
478 | 476 | except KeyboardInterrupt: |
|
479 | 477 | msg = ip.get_exception_only() |
|
480 | 478 | self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n') |
|
481 | 479 | |
|
482 | 480 | def test_inspect_text(self): |
|
483 | 481 | ip.run_cell('a = 5') |
|
484 | 482 | text = ip.object_inspect_text('a') |
|
485 | 483 | self.assertIsInstance(text, str) |
|
486 | 484 | |
|
487 | 485 | def test_last_execution_result(self): |
|
488 | 486 | """ Check that last execution result gets set correctly (GH-10702) """ |
|
489 | 487 | result = ip.run_cell('a = 5; a') |
|
490 | 488 | self.assertTrue(ip.last_execution_succeeded) |
|
491 | 489 | self.assertEqual(ip.last_execution_result.result, 5) |
|
492 | 490 | |
|
493 | 491 | result = ip.run_cell('a = x_invalid_id_x') |
|
494 | 492 | self.assertFalse(ip.last_execution_succeeded) |
|
495 | 493 | self.assertFalse(ip.last_execution_result.success) |
|
496 | 494 | self.assertIsInstance(ip.last_execution_result.error_in_exec, NameError) |
|
497 | 495 | |
|
498 | 496 | |
|
499 | 497 | class TestSafeExecfileNonAsciiPath(unittest.TestCase): |
|
500 | 498 | |
|
501 | 499 | @onlyif_unicode_paths |
|
502 | 500 | def setUp(self): |
|
503 | 501 | self.BASETESTDIR = tempfile.mkdtemp() |
|
504 | 502 | self.TESTDIR = join(self.BASETESTDIR, u"åäö") |
|
505 | 503 | os.mkdir(self.TESTDIR) |
|
506 | 504 | with open(join(self.TESTDIR, u"åäötestscript.py"), "w") as sfile: |
|
507 | 505 | sfile.write("pass\n") |
|
508 | 506 | self.oldpath = os.getcwd() |
|
509 | 507 | os.chdir(self.TESTDIR) |
|
510 | 508 | self.fname = u"åäötestscript.py" |
|
511 | 509 | |
|
512 | 510 | def tearDown(self): |
|
513 | 511 | os.chdir(self.oldpath) |
|
514 | 512 | shutil.rmtree(self.BASETESTDIR) |
|
515 | 513 | |
|
516 | 514 | @onlyif_unicode_paths |
|
517 | 515 | def test_1(self): |
|
518 | 516 | """Test safe_execfile with non-ascii path |
|
519 | 517 | """ |
|
520 | 518 | ip.safe_execfile(self.fname, {}, raise_exceptions=True) |
|
521 | 519 | |
|
522 | 520 | class ExitCodeChecks(tt.TempFileMixin): |
|
523 | 521 | def test_exit_code_ok(self): |
|
524 | 522 | self.system('exit 0') |
|
525 | 523 | self.assertEqual(ip.user_ns['_exit_code'], 0) |
|
526 | 524 | |
|
527 | 525 | def test_exit_code_error(self): |
|
528 | 526 | self.system('exit 1') |
|
529 | 527 | self.assertEqual(ip.user_ns['_exit_code'], 1) |
|
530 | 528 | |
|
531 | 529 | @skipif(not hasattr(signal, 'SIGALRM')) |
|
532 | 530 | def test_exit_code_signal(self): |
|
533 | 531 | self.mktmp("import signal, time\n" |
|
534 | 532 | "signal.setitimer(signal.ITIMER_REAL, 0.1)\n" |
|
535 | 533 | "time.sleep(1)\n") |
|
536 | 534 | self.system("%s %s" % (sys.executable, self.fname)) |
|
537 | 535 | self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGALRM) |
|
538 | 536 | |
|
539 | 537 | @onlyif_cmds_exist("csh") |
|
540 | 538 | def test_exit_code_signal_csh(self): |
|
541 | 539 | SHELL = os.environ.get('SHELL', None) |
|
542 | 540 | os.environ['SHELL'] = find_cmd("csh") |
|
543 | 541 | try: |
|
544 | 542 | self.test_exit_code_signal() |
|
545 | 543 | finally: |
|
546 | 544 | if SHELL is not None: |
|
547 | 545 | os.environ['SHELL'] = SHELL |
|
548 | 546 | else: |
|
549 | 547 | del os.environ['SHELL'] |
|
550 | 548 | |
|
551 | 549 | class TestSystemRaw(unittest.TestCase, ExitCodeChecks): |
|
552 | 550 | system = ip.system_raw |
|
553 | 551 | |
|
554 | 552 | @onlyif_unicode_paths |
|
555 | 553 | def test_1(self): |
|
556 | 554 | """Test system_raw with non-ascii cmd |
|
557 | 555 | """ |
|
558 | 556 | cmd = u'''python -c "'åäö'" ''' |
|
559 | 557 | ip.system_raw(cmd) |
|
560 | 558 | |
|
561 | 559 | @mock.patch('subprocess.call', side_effect=KeyboardInterrupt) |
|
562 | 560 | @mock.patch('os.system', side_effect=KeyboardInterrupt) |
|
563 | 561 | def test_control_c(self, *mocks): |
|
564 | 562 | try: |
|
565 | 563 | self.system("sleep 1 # wont happen") |
|
566 | 564 | except KeyboardInterrupt: |
|
567 | 565 | self.fail("system call should intercept " |
|
568 | 566 | "keyboard interrupt from subprocess.call") |
|
569 | 567 | self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGINT) |
|
570 | 568 | |
|
571 | 569 | # TODO: Exit codes are currently ignored on Windows. |
|
572 | 570 | class TestSystemPipedExitCode(unittest.TestCase, ExitCodeChecks): |
|
573 | 571 | system = ip.system_piped |
|
574 | 572 | |
|
575 | 573 | @skip_win32 |
|
576 | 574 | def test_exit_code_ok(self): |
|
577 | 575 | ExitCodeChecks.test_exit_code_ok(self) |
|
578 | 576 | |
|
579 | 577 | @skip_win32 |
|
580 | 578 | def test_exit_code_error(self): |
|
581 | 579 | ExitCodeChecks.test_exit_code_error(self) |
|
582 | 580 | |
|
583 | 581 | @skip_win32 |
|
584 | 582 | def test_exit_code_signal(self): |
|
585 | 583 | ExitCodeChecks.test_exit_code_signal(self) |
|
586 | 584 | |
|
587 | 585 | class TestModules(unittest.TestCase, tt.TempFileMixin): |
|
588 | 586 | def test_extraneous_loads(self): |
|
589 | 587 | """Test we're not loading modules on startup that we shouldn't. |
|
590 | 588 | """ |
|
591 | 589 | self.mktmp("import sys\n" |
|
592 | 590 | "print('numpy' in sys.modules)\n" |
|
593 | 591 | "print('ipyparallel' in sys.modules)\n" |
|
594 | 592 | "print('ipykernel' in sys.modules)\n" |
|
595 | 593 | ) |
|
596 | 594 | out = "False\nFalse\nFalse\n" |
|
597 | 595 | tt.ipexec_validate(self.fname, out) |
|
598 | 596 | |
|
599 | 597 | class Negator(ast.NodeTransformer): |
|
600 | 598 | """Negates all number literals in an AST.""" |
|
601 | 599 | def visit_Num(self, node): |
|
602 | 600 | node.n = -node.n |
|
603 | 601 | return node |
|
604 | 602 | |
|
605 | 603 | class TestAstTransform(unittest.TestCase): |
|
606 | 604 | def setUp(self): |
|
607 | 605 | self.negator = Negator() |
|
608 | 606 | ip.ast_transformers.append(self.negator) |
|
609 | 607 | |
|
610 | 608 | def tearDown(self): |
|
611 | 609 | ip.ast_transformers.remove(self.negator) |
|
612 | 610 | |
|
613 | 611 | def test_run_cell(self): |
|
614 | 612 | with tt.AssertPrints('-34'): |
|
615 | 613 | ip.run_cell('print (12 + 22)') |
|
616 | 614 | |
|
617 | 615 | # A named reference to a number shouldn't be transformed. |
|
618 | 616 | ip.user_ns['n'] = 55 |
|
619 | 617 | with tt.AssertNotPrints('-55'): |
|
620 | 618 | ip.run_cell('print (n)') |
|
621 | 619 | |
|
622 | 620 | def test_timeit(self): |
|
623 | 621 | called = set() |
|
624 | 622 | def f(x): |
|
625 | 623 | called.add(x) |
|
626 | 624 | ip.push({'f':f}) |
|
627 | 625 | |
|
628 | 626 | with tt.AssertPrints("std. dev. of"): |
|
629 | 627 | ip.run_line_magic("timeit", "-n1 f(1)") |
|
630 | 628 | self.assertEqual(called, {-1}) |
|
631 | 629 | called.clear() |
|
632 | 630 | |
|
633 | 631 | with tt.AssertPrints("std. dev. of"): |
|
634 | 632 | ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)") |
|
635 | 633 | self.assertEqual(called, {-2, -3}) |
|
636 | 634 | |
|
637 | 635 | def test_time(self): |
|
638 | 636 | called = [] |
|
639 | 637 | def f(x): |
|
640 | 638 | called.append(x) |
|
641 | 639 | ip.push({'f':f}) |
|
642 | 640 | |
|
643 | 641 | # Test with an expression |
|
644 | 642 | with tt.AssertPrints("Wall time: "): |
|
645 | 643 | ip.run_line_magic("time", "f(5+9)") |
|
646 | 644 | self.assertEqual(called, [-14]) |
|
647 | 645 | called[:] = [] |
|
648 | 646 | |
|
649 | 647 | # Test with a statement (different code path) |
|
650 | 648 | with tt.AssertPrints("Wall time: "): |
|
651 | 649 | ip.run_line_magic("time", "a = f(-3 + -2)") |
|
652 | 650 | self.assertEqual(called, [5]) |
|
653 | 651 | |
|
654 | 652 | def test_macro(self): |
|
655 | 653 | ip.push({'a':10}) |
|
656 | 654 | # The AST transformation makes this do a+=-1 |
|
657 | 655 | ip.define_macro("amacro", "a+=1\nprint(a)") |
|
658 | 656 | |
|
659 | 657 | with tt.AssertPrints("9"): |
|
660 | 658 | ip.run_cell("amacro") |
|
661 | 659 | with tt.AssertPrints("8"): |
|
662 | 660 | ip.run_cell("amacro") |
|
663 | 661 | |
|
664 | 662 | class IntegerWrapper(ast.NodeTransformer): |
|
665 | 663 | """Wraps all integers in a call to Integer()""" |
|
666 | 664 | def visit_Num(self, node): |
|
667 | 665 | if isinstance(node.n, int): |
|
668 | 666 | return ast.Call(func=ast.Name(id='Integer', ctx=ast.Load()), |
|
669 | 667 | args=[node], keywords=[]) |
|
670 | 668 | return node |
|
671 | 669 | |
|
672 | 670 | class TestAstTransform2(unittest.TestCase): |
|
673 | 671 | def setUp(self): |
|
674 | 672 | self.intwrapper = IntegerWrapper() |
|
675 | 673 | ip.ast_transformers.append(self.intwrapper) |
|
676 | 674 | |
|
677 | 675 | self.calls = [] |
|
678 | 676 | def Integer(*args): |
|
679 | 677 | self.calls.append(args) |
|
680 | 678 | return args |
|
681 | 679 | ip.push({"Integer": Integer}) |
|
682 | 680 | |
|
683 | 681 | def tearDown(self): |
|
684 | 682 | ip.ast_transformers.remove(self.intwrapper) |
|
685 | 683 | del ip.user_ns['Integer'] |
|
686 | 684 | |
|
687 | 685 | def test_run_cell(self): |
|
688 | 686 | ip.run_cell("n = 2") |
|
689 | 687 | self.assertEqual(self.calls, [(2,)]) |
|
690 | 688 | |
|
691 | 689 | # This shouldn't throw an error |
|
692 | 690 | ip.run_cell("o = 2.0") |
|
693 | 691 | self.assertEqual(ip.user_ns['o'], 2.0) |
|
694 | 692 | |
|
695 | 693 | def test_timeit(self): |
|
696 | 694 | called = set() |
|
697 | 695 | def f(x): |
|
698 | 696 | called.add(x) |
|
699 | 697 | ip.push({'f':f}) |
|
700 | 698 | |
|
701 | 699 | with tt.AssertPrints("std. dev. of"): |
|
702 | 700 | ip.run_line_magic("timeit", "-n1 f(1)") |
|
703 | 701 | self.assertEqual(called, {(1,)}) |
|
704 | 702 | called.clear() |
|
705 | 703 | |
|
706 | 704 | with tt.AssertPrints("std. dev. of"): |
|
707 | 705 | ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)") |
|
708 | 706 | self.assertEqual(called, {(2,), (3,)}) |
|
709 | 707 | |
|
710 | 708 | class ErrorTransformer(ast.NodeTransformer): |
|
711 | 709 | """Throws an error when it sees a number.""" |
|
712 | 710 | def visit_Num(self, node): |
|
713 | 711 | raise ValueError("test") |
|
714 | 712 | |
|
715 | 713 | class TestAstTransformError(unittest.TestCase): |
|
716 | 714 | def test_unregistering(self): |
|
717 | 715 | err_transformer = ErrorTransformer() |
|
718 | 716 | ip.ast_transformers.append(err_transformer) |
|
719 | 717 | |
|
720 | 718 | with tt.AssertPrints("unregister", channel='stderr'): |
|
721 | 719 | ip.run_cell("1 + 2") |
|
722 | 720 | |
|
723 | 721 | # This should have been removed. |
|
724 | 722 | nt.assert_not_in(err_transformer, ip.ast_transformers) |
|
725 | 723 | |
|
726 | 724 | |
|
727 | 725 | class StringRejector(ast.NodeTransformer): |
|
728 | 726 | """Throws an InputRejected when it sees a string literal. |
|
729 | 727 | |
|
730 | 728 | Used to verify that NodeTransformers can signal that a piece of code should |
|
731 | 729 | not be executed by throwing an InputRejected. |
|
732 | 730 | """ |
|
733 | 731 | |
|
734 | 732 | def visit_Str(self, node): |
|
735 | 733 | raise InputRejected("test") |
|
736 | 734 | |
|
737 | 735 | |
|
738 | 736 | class TestAstTransformInputRejection(unittest.TestCase): |
|
739 | 737 | |
|
740 | 738 | def setUp(self): |
|
741 | 739 | self.transformer = StringRejector() |
|
742 | 740 | ip.ast_transformers.append(self.transformer) |
|
743 | 741 | |
|
744 | 742 | def tearDown(self): |
|
745 | 743 | ip.ast_transformers.remove(self.transformer) |
|
746 | 744 | |
|
747 | 745 | def test_input_rejection(self): |
|
748 | 746 | """Check that NodeTransformers can reject input.""" |
|
749 | 747 | |
|
750 | 748 | expect_exception_tb = tt.AssertPrints("InputRejected: test") |
|
751 | 749 | expect_no_cell_output = tt.AssertNotPrints("'unsafe'", suppress=False) |
|
752 | 750 | |
|
753 | 751 | # Run the same check twice to verify that the transformer is not |
|
754 | 752 | # disabled after raising. |
|
755 | 753 | with expect_exception_tb, expect_no_cell_output: |
|
756 | 754 | ip.run_cell("'unsafe'") |
|
757 | 755 | |
|
758 | 756 | with expect_exception_tb, expect_no_cell_output: |
|
759 | 757 | res = ip.run_cell("'unsafe'") |
|
760 | 758 | |
|
761 | 759 | self.assertIsInstance(res.error_before_exec, InputRejected) |
|
762 | 760 | |
|
763 | 761 | def test__IPYTHON__(): |
|
764 | 762 | # This shouldn't raise a NameError, that's all |
|
765 | 763 | __IPYTHON__ |
|
766 | 764 | |
|
767 | 765 | |
|
768 | 766 | class DummyRepr(object): |
|
769 | 767 | def __repr__(self): |
|
770 | 768 | return "DummyRepr" |
|
771 | 769 | |
|
772 | 770 | def _repr_html_(self): |
|
773 | 771 | return "<b>dummy</b>" |
|
774 | 772 | |
|
775 | 773 | def _repr_javascript_(self): |
|
776 | 774 | return "console.log('hi');", {'key': 'value'} |
|
777 | 775 | |
|
778 | 776 | |
|
779 | 777 | def test_user_variables(): |
|
780 | 778 | # enable all formatters |
|
781 | 779 | ip.display_formatter.active_types = ip.display_formatter.format_types |
|
782 | 780 | |
|
783 | 781 | ip.user_ns['dummy'] = d = DummyRepr() |
|
784 | 782 | keys = {'dummy', 'doesnotexist'} |
|
785 | 783 | r = ip.user_expressions({ key:key for key in keys}) |
|
786 | 784 | |
|
787 | 785 | nt.assert_equal(keys, set(r.keys())) |
|
788 | 786 | dummy = r['dummy'] |
|
789 | 787 | nt.assert_equal({'status', 'data', 'metadata'}, set(dummy.keys())) |
|
790 | 788 | nt.assert_equal(dummy['status'], 'ok') |
|
791 | 789 | data = dummy['data'] |
|
792 | 790 | metadata = dummy['metadata'] |
|
793 | 791 | nt.assert_equal(data.get('text/html'), d._repr_html_()) |
|
794 | 792 | js, jsmd = d._repr_javascript_() |
|
795 | 793 | nt.assert_equal(data.get('application/javascript'), js) |
|
796 | 794 | nt.assert_equal(metadata.get('application/javascript'), jsmd) |
|
797 | 795 | |
|
798 | 796 | dne = r['doesnotexist'] |
|
799 | 797 | nt.assert_equal(dne['status'], 'error') |
|
800 | 798 | nt.assert_equal(dne['ename'], 'NameError') |
|
801 | 799 | |
|
802 | 800 | # back to text only |
|
803 | 801 | ip.display_formatter.active_types = ['text/plain'] |
|
804 | 802 | |
|
805 | 803 | def test_user_expression(): |
|
806 | 804 | # enable all formatters |
|
807 | 805 | ip.display_formatter.active_types = ip.display_formatter.format_types |
|
808 | 806 | query = { |
|
809 | 807 | 'a' : '1 + 2', |
|
810 | 808 | 'b' : '1/0', |
|
811 | 809 | } |
|
812 | 810 | r = ip.user_expressions(query) |
|
813 | 811 | import pprint |
|
814 | 812 | pprint.pprint(r) |
|
815 | 813 | nt.assert_equal(set(r.keys()), set(query.keys())) |
|
816 | 814 | a = r['a'] |
|
817 | 815 | nt.assert_equal({'status', 'data', 'metadata'}, set(a.keys())) |
|
818 | 816 | nt.assert_equal(a['status'], 'ok') |
|
819 | 817 | data = a['data'] |
|
820 | 818 | metadata = a['metadata'] |
|
821 | 819 | nt.assert_equal(data.get('text/plain'), '3') |
|
822 | 820 | |
|
823 | 821 | b = r['b'] |
|
824 | 822 | nt.assert_equal(b['status'], 'error') |
|
825 | 823 | nt.assert_equal(b['ename'], 'ZeroDivisionError') |
|
826 | 824 | |
|
827 | 825 | # back to text only |
|
828 | 826 | ip.display_formatter.active_types = ['text/plain'] |
|
829 | 827 | |
|
830 | 828 | |
|
831 | 829 | |
|
832 | 830 | |
|
833 | 831 | |
|
834 | 832 | class TestSyntaxErrorTransformer(unittest.TestCase): |
|
835 | 833 | """Check that SyntaxError raised by an input transformer is handled by run_cell()""" |
|
836 | 834 | |
|
837 | 835 | class SyntaxErrorTransformer(InputTransformer): |
|
838 | 836 | |
|
839 | 837 | def push(self, line): |
|
840 | 838 | pos = line.find('syntaxerror') |
|
841 | 839 | if pos >= 0: |
|
842 | 840 | e = SyntaxError('input contains "syntaxerror"') |
|
843 | 841 | e.text = line |
|
844 | 842 | e.offset = pos + 1 |
|
845 | 843 | raise e |
|
846 | 844 | return line |
|
847 | 845 | |
|
848 | 846 | def reset(self): |
|
849 | 847 | pass |
|
850 | 848 | |
|
851 | 849 | def setUp(self): |
|
852 | 850 | self.transformer = TestSyntaxErrorTransformer.SyntaxErrorTransformer() |
|
853 | 851 | ip.input_splitter.python_line_transforms.append(self.transformer) |
|
854 | 852 | ip.input_transformer_manager.python_line_transforms.append(self.transformer) |
|
855 | 853 | |
|
856 | 854 | def tearDown(self): |
|
857 | 855 | ip.input_splitter.python_line_transforms.remove(self.transformer) |
|
858 | 856 | ip.input_transformer_manager.python_line_transforms.remove(self.transformer) |
|
859 | 857 | |
|
860 | 858 | def test_syntaxerror_input_transformer(self): |
|
861 | 859 | with tt.AssertPrints('1234'): |
|
862 | 860 | ip.run_cell('1234') |
|
863 | 861 | with tt.AssertPrints('SyntaxError: invalid syntax'): |
|
864 | 862 | ip.run_cell('1 2 3') # plain python syntax error |
|
865 | 863 | with tt.AssertPrints('SyntaxError: input contains "syntaxerror"'): |
|
866 | 864 | ip.run_cell('2345 # syntaxerror') # input transformer syntax error |
|
867 | 865 | with tt.AssertPrints('3456'): |
|
868 | 866 | ip.run_cell('3456') |
|
869 | 867 | |
|
870 | 868 | |
|
871 | 869 | |
|
872 | 870 | def test_warning_suppression(): |
|
873 | 871 | ip.run_cell("import warnings") |
|
874 | 872 | try: |
|
875 | 873 | with tt.AssertPrints("UserWarning: asdf", channel="stderr"): |
|
876 | 874 | ip.run_cell("warnings.warn('asdf')") |
|
877 | 875 | # Here's the real test -- if we run that again, we should get the |
|
878 | 876 | # warning again. Traditionally, each warning was only issued once per |
|
879 | 877 | # IPython session (approximately), even if the user typed in new and |
|
880 | 878 | # different code that should have also triggered the warning, leading |
|
881 | 879 | # to much confusion. |
|
882 | 880 | with tt.AssertPrints("UserWarning: asdf", channel="stderr"): |
|
883 | 881 | ip.run_cell("warnings.warn('asdf')") |
|
884 | 882 | finally: |
|
885 | 883 | ip.run_cell("del warnings") |
|
886 | 884 | |
|
887 | 885 | |
|
888 | 886 | def test_deprecation_warning(): |
|
889 | 887 | ip.run_cell(""" |
|
890 | 888 | import warnings |
|
891 | 889 | def wrn(): |
|
892 | 890 | warnings.warn( |
|
893 | 891 | "I AM A WARNING", |
|
894 | 892 | DeprecationWarning |
|
895 | 893 | ) |
|
896 | 894 | """) |
|
897 | 895 | try: |
|
898 | 896 | with tt.AssertPrints("I AM A WARNING", channel="stderr"): |
|
899 | 897 | ip.run_cell("wrn()") |
|
900 | 898 | finally: |
|
901 | 899 | ip.run_cell("del warnings") |
|
902 | 900 | ip.run_cell("del wrn") |
|
903 | 901 | |
|
904 | 902 | |
|
905 | 903 | class TestImportNoDeprecate(tt.TempFileMixin): |
|
906 | 904 | |
|
907 | 905 | def setup(self): |
|
908 | 906 | """Make a valid python temp file.""" |
|
909 | 907 | self.mktmp(""" |
|
910 | 908 | import warnings |
|
911 | 909 | def wrn(): |
|
912 | 910 | warnings.warn( |
|
913 | 911 | "I AM A WARNING", |
|
914 | 912 | DeprecationWarning |
|
915 | 913 | ) |
|
916 | 914 | """) |
|
917 | 915 | |
|
918 | 916 | def test_no_dep(self): |
|
919 | 917 | """ |
|
920 | 918 | No deprecation warning should be raised from imported functions |
|
921 | 919 | """ |
|
922 | 920 | ip.run_cell("from {} import wrn".format(self.fname)) |
|
923 | 921 | |
|
924 | 922 | with tt.AssertNotPrints("I AM A WARNING"): |
|
925 | 923 | ip.run_cell("wrn()") |
|
926 | 924 | ip.run_cell("del wrn") |
@@ -1,434 +1,432 b'' | |||
|
1 | 1 | """Tests for the object inspection functionality. |
|
2 | 2 | """ |
|
3 | 3 | |
|
4 | 4 | # Copyright (c) IPython Development Team. |
|
5 | 5 | # Distributed under the terms of the Modified BSD License. |
|
6 | 6 | |
|
7 | 7 | |
|
8 | 8 | from inspect import Signature, Parameter |
|
9 | 9 | import os |
|
10 | 10 | import re |
|
11 | 11 | import sys |
|
12 | 12 | |
|
13 | 13 | import nose.tools as nt |
|
14 | 14 | |
|
15 | 15 | from .. import oinspect |
|
16 | 16 | from IPython.core.magic import (Magics, magics_class, line_magic, |
|
17 | 17 | cell_magic, line_cell_magic, |
|
18 | 18 | register_line_magic, register_cell_magic, |
|
19 | 19 | register_line_cell_magic) |
|
20 | 20 | from decorator import decorator |
|
21 | 21 | from IPython import get_ipython |
|
22 | from IPython.testing.decorators import skipif | |
|
23 | 22 | from IPython.testing.tools import AssertPrints, AssertNotPrints |
|
24 | 23 | from IPython.utils.path import compress_user |
|
25 | from IPython.utils import py3compat | |
|
26 | 24 | |
|
27 | 25 | |
|
28 | 26 | #----------------------------------------------------------------------------- |
|
29 | 27 | # Globals and constants |
|
30 | 28 | #----------------------------------------------------------------------------- |
|
31 | 29 | |
|
32 | 30 | inspector = oinspect.Inspector() |
|
33 | 31 | ip = get_ipython() |
|
34 | 32 | |
|
35 | 33 | #----------------------------------------------------------------------------- |
|
36 | 34 | # Local utilities |
|
37 | 35 | #----------------------------------------------------------------------------- |
|
38 | 36 | |
|
39 | 37 | # WARNING: since this test checks the line number where a function is |
|
40 | 38 | # defined, if any code is inserted above, the following line will need to be |
|
41 | 39 | # updated. Do NOT insert any whitespace between the next line and the function |
|
42 | 40 | # definition below. |
|
43 |
THIS_LINE_NUMBER = 4 |
|
|
41 | THIS_LINE_NUMBER = 41 # Put here the actual number of this line | |
|
44 | 42 | |
|
45 | 43 | from unittest import TestCase |
|
46 | 44 | |
|
47 | 45 | class Test(TestCase): |
|
48 | 46 | |
|
49 | 47 | def test_find_source_lines(self): |
|
50 |
self.assertEqual(oinspect.find_source_lines(Test.test_find_source_lines), |
|
|
48 | self.assertEqual(oinspect.find_source_lines(Test.test_find_source_lines), | |
|
51 | 49 | THIS_LINE_NUMBER+6) |
|
52 | 50 | |
|
53 | 51 | |
|
54 | 52 | # A couple of utilities to ensure these tests work the same from a source or a |
|
55 | 53 | # binary install |
|
56 | 54 | def pyfile(fname): |
|
57 | 55 | return os.path.normcase(re.sub('.py[co]$', '.py', fname)) |
|
58 | 56 | |
|
59 | 57 | |
|
60 | 58 | def match_pyfiles(f1, f2): |
|
61 | 59 | nt.assert_equal(pyfile(f1), pyfile(f2)) |
|
62 | 60 | |
|
63 | 61 | |
|
64 | 62 | def test_find_file(): |
|
65 | 63 | match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__)) |
|
66 | 64 | |
|
67 | 65 | |
|
68 | 66 | def test_find_file_decorated1(): |
|
69 | 67 | |
|
70 | 68 | @decorator |
|
71 | 69 | def noop1(f): |
|
72 | 70 | def wrapper(*a, **kw): |
|
73 | 71 | return f(*a, **kw) |
|
74 | 72 | return wrapper |
|
75 | 73 | |
|
76 | 74 | @noop1 |
|
77 | 75 | def f(x): |
|
78 | 76 | "My docstring" |
|
79 | 77 | |
|
80 | 78 | match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__)) |
|
81 | 79 | nt.assert_equal(f.__doc__, "My docstring") |
|
82 | 80 | |
|
83 | 81 | |
|
84 | 82 | def test_find_file_decorated2(): |
|
85 | 83 | |
|
86 | 84 | @decorator |
|
87 | 85 | def noop2(f, *a, **kw): |
|
88 | 86 | return f(*a, **kw) |
|
89 | 87 | |
|
90 | 88 | @noop2 |
|
91 | 89 | @noop2 |
|
92 | 90 | @noop2 |
|
93 | 91 | def f(x): |
|
94 | 92 | "My docstring 2" |
|
95 | 93 | |
|
96 | 94 | match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__)) |
|
97 | 95 | nt.assert_equal(f.__doc__, "My docstring 2") |
|
98 | 96 | |
|
99 | 97 | |
|
100 | 98 | def test_find_file_magic(): |
|
101 | 99 | run = ip.find_line_magic('run') |
|
102 | 100 | nt.assert_not_equal(oinspect.find_file(run), None) |
|
103 | 101 | |
|
104 | 102 | |
|
105 | 103 | # A few generic objects we can then inspect in the tests below |
|
106 | 104 | |
|
107 | 105 | class Call(object): |
|
108 | 106 | """This is the class docstring.""" |
|
109 | 107 | |
|
110 | 108 | def __init__(self, x, y=1): |
|
111 | 109 | """This is the constructor docstring.""" |
|
112 | 110 | |
|
113 | 111 | def __call__(self, *a, **kw): |
|
114 | 112 | """This is the call docstring.""" |
|
115 | 113 | |
|
116 | 114 | def method(self, x, z=2): |
|
117 | 115 | """Some method's docstring""" |
|
118 | 116 | |
|
119 | 117 | class HasSignature(object): |
|
120 | 118 | """This is the class docstring.""" |
|
121 | 119 | __signature__ = Signature([Parameter('test', Parameter.POSITIONAL_OR_KEYWORD)]) |
|
122 | 120 | |
|
123 | 121 | def __init__(self, *args): |
|
124 | 122 | """This is the init docstring""" |
|
125 | 123 | |
|
126 | 124 | |
|
127 | 125 | class SimpleClass(object): |
|
128 | 126 | def method(self, x, z=2): |
|
129 | 127 | """Some method's docstring""" |
|
130 | 128 | |
|
131 | 129 | |
|
132 | 130 | class OldStyle: |
|
133 | 131 | """An old-style class for testing.""" |
|
134 | 132 | pass |
|
135 | 133 | |
|
136 | 134 | |
|
137 | 135 | def f(x, y=2, *a, **kw): |
|
138 | 136 | """A simple function.""" |
|
139 | 137 | |
|
140 | 138 | |
|
141 | 139 | def g(y, z=3, *a, **kw): |
|
142 | 140 | pass # no docstring |
|
143 | 141 | |
|
144 | 142 | |
|
145 | 143 | @register_line_magic |
|
146 | 144 | def lmagic(line): |
|
147 | 145 | "A line magic" |
|
148 | 146 | |
|
149 | 147 | |
|
150 | 148 | @register_cell_magic |
|
151 | 149 | def cmagic(line, cell): |
|
152 | 150 | "A cell magic" |
|
153 | 151 | |
|
154 | 152 | |
|
155 | 153 | @register_line_cell_magic |
|
156 | 154 | def lcmagic(line, cell=None): |
|
157 | 155 | "A line/cell magic" |
|
158 | 156 | |
|
159 | 157 | |
|
160 | 158 | @magics_class |
|
161 | 159 | class SimpleMagics(Magics): |
|
162 | 160 | @line_magic |
|
163 | 161 | def Clmagic(self, cline): |
|
164 | 162 | "A class-based line magic" |
|
165 | 163 | |
|
166 | 164 | @cell_magic |
|
167 | 165 | def Ccmagic(self, cline, ccell): |
|
168 | 166 | "A class-based cell magic" |
|
169 | 167 | |
|
170 | 168 | @line_cell_magic |
|
171 | 169 | def Clcmagic(self, cline, ccell=None): |
|
172 | 170 | "A class-based line/cell magic" |
|
173 | 171 | |
|
174 | 172 | |
|
175 | 173 | class Awkward(object): |
|
176 | 174 | def __getattr__(self, name): |
|
177 | 175 | raise Exception(name) |
|
178 | 176 | |
|
179 | 177 | class NoBoolCall: |
|
180 | 178 | """ |
|
181 | 179 | callable with `__bool__` raising should still be inspect-able. |
|
182 | 180 | """ |
|
183 | 181 | |
|
184 | 182 | def __call__(self): |
|
185 | 183 | """does nothing""" |
|
186 | 184 | pass |
|
187 | 185 | |
|
188 | 186 | def __bool__(self): |
|
189 | 187 | """just raise NotImplemented""" |
|
190 | 188 | raise NotImplementedError('Must be implemented') |
|
191 | 189 | |
|
192 | 190 | |
|
193 | 191 | class SerialLiar(object): |
|
194 | 192 | """Attribute accesses always get another copy of the same class. |
|
195 | 193 | |
|
196 | 194 | unittest.mock.call does something similar, but it's not ideal for testing |
|
197 | 195 | as the failure mode is to eat all your RAM. This gives up after 10k levels. |
|
198 | 196 | """ |
|
199 | 197 | def __init__(self, max_fibbing_twig, lies_told=0): |
|
200 | 198 | if lies_told > 10000: |
|
201 | 199 | raise RuntimeError('Nose too long, honesty is the best policy') |
|
202 | 200 | self.max_fibbing_twig = max_fibbing_twig |
|
203 | 201 | self.lies_told = lies_told |
|
204 | 202 | max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told) |
|
205 | 203 | |
|
206 | 204 | def __getattr__(self, item): |
|
207 | 205 | return SerialLiar(self.max_fibbing_twig, self.lies_told + 1) |
|
208 | 206 | |
|
209 | 207 | #----------------------------------------------------------------------------- |
|
210 | 208 | # Tests |
|
211 | 209 | #----------------------------------------------------------------------------- |
|
212 | 210 | |
|
213 | 211 | def test_info(): |
|
214 | 212 | "Check that Inspector.info fills out various fields as expected." |
|
215 | 213 | i = inspector.info(Call, oname='Call') |
|
216 | 214 | nt.assert_equal(i['type_name'], 'type') |
|
217 | 215 | expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'> |
|
218 | 216 | nt.assert_equal(i['base_class'], expted_class) |
|
219 | 217 | nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>") |
|
220 | 218 | fname = __file__ |
|
221 | 219 | if fname.endswith(".pyc"): |
|
222 | 220 | fname = fname[:-1] |
|
223 | 221 | # case-insensitive comparison needed on some filesystems |
|
224 | 222 | # e.g. Windows: |
|
225 | 223 | nt.assert_equal(i['file'].lower(), compress_user(fname).lower()) |
|
226 | 224 | nt.assert_equal(i['definition'], None) |
|
227 | 225 | nt.assert_equal(i['docstring'], Call.__doc__) |
|
228 | 226 | nt.assert_equal(i['source'], None) |
|
229 | 227 | nt.assert_true(i['isclass']) |
|
230 | 228 | nt.assert_equal(i['init_definition'], "Call(x, y=1)") |
|
231 | 229 | nt.assert_equal(i['init_docstring'], Call.__init__.__doc__) |
|
232 | 230 | |
|
233 | 231 | i = inspector.info(Call, detail_level=1) |
|
234 | 232 | nt.assert_not_equal(i['source'], None) |
|
235 | 233 | nt.assert_equal(i['docstring'], None) |
|
236 | 234 | |
|
237 | 235 | c = Call(1) |
|
238 | 236 | c.__doc__ = "Modified instance docstring" |
|
239 | 237 | i = inspector.info(c) |
|
240 | 238 | nt.assert_equal(i['type_name'], 'Call') |
|
241 | 239 | nt.assert_equal(i['docstring'], "Modified instance docstring") |
|
242 | 240 | nt.assert_equal(i['class_docstring'], Call.__doc__) |
|
243 | 241 | nt.assert_equal(i['init_docstring'], Call.__init__.__doc__) |
|
244 | 242 | nt.assert_equal(i['call_docstring'], Call.__call__.__doc__) |
|
245 | 243 | |
|
246 | 244 | def test_class_signature(): |
|
247 | 245 | info = inspector.info(HasSignature, 'HasSignature') |
|
248 | 246 | nt.assert_equal(info['init_definition'], "HasSignature(test)") |
|
249 | 247 | nt.assert_equal(info['init_docstring'], HasSignature.__init__.__doc__) |
|
250 | 248 | |
|
251 | 249 | def test_info_awkward(): |
|
252 | 250 | # Just test that this doesn't throw an error. |
|
253 | 251 | inspector.info(Awkward()) |
|
254 | 252 | |
|
255 | 253 | def test_bool_raise(): |
|
256 | 254 | inspector.info(NoBoolCall()) |
|
257 | 255 | |
|
258 | 256 | def test_info_serialliar(): |
|
259 | 257 | fib_tracker = [0] |
|
260 | 258 | inspector.info(SerialLiar(fib_tracker)) |
|
261 | 259 | |
|
262 | 260 | # Nested attribute access should be cut off at 100 levels deep to avoid |
|
263 | 261 | # infinite loops: https://github.com/ipython/ipython/issues/9122 |
|
264 | 262 | nt.assert_less(fib_tracker[0], 9000) |
|
265 | 263 | |
|
266 | 264 | def test_calldef_none(): |
|
267 | 265 | # We should ignore __call__ for all of these. |
|
268 | 266 | for obj in [f, SimpleClass().method, any, str.upper]: |
|
269 | 267 | print(obj) |
|
270 | 268 | i = inspector.info(obj) |
|
271 | 269 | nt.assert_is(i['call_def'], None) |
|
272 | 270 | |
|
273 | 271 | def f_kwarg(pos, *, kwonly): |
|
274 | 272 | pass |
|
275 | 273 | |
|
276 | 274 | def test_definition_kwonlyargs(): |
|
277 | 275 | i = inspector.info(f_kwarg, oname='f_kwarg') # analysis:ignore |
|
278 | 276 | nt.assert_equal(i['definition'], "f_kwarg(pos, *, kwonly)") |
|
279 | 277 | |
|
280 | 278 | def test_getdoc(): |
|
281 | 279 | class A(object): |
|
282 | 280 | """standard docstring""" |
|
283 | 281 | pass |
|
284 | 282 | |
|
285 | 283 | class B(object): |
|
286 | 284 | """standard docstring""" |
|
287 | 285 | def getdoc(self): |
|
288 | 286 | return "custom docstring" |
|
289 | 287 | |
|
290 | 288 | class C(object): |
|
291 | 289 | """standard docstring""" |
|
292 | 290 | def getdoc(self): |
|
293 | 291 | return None |
|
294 | 292 | |
|
295 | 293 | a = A() |
|
296 | 294 | b = B() |
|
297 | 295 | c = C() |
|
298 | 296 | |
|
299 | 297 | nt.assert_equal(oinspect.getdoc(a), "standard docstring") |
|
300 | 298 | nt.assert_equal(oinspect.getdoc(b), "custom docstring") |
|
301 | 299 | nt.assert_equal(oinspect.getdoc(c), "standard docstring") |
|
302 | 300 | |
|
303 | 301 | |
|
304 | 302 | def test_empty_property_has_no_source(): |
|
305 | 303 | i = inspector.info(property(), detail_level=1) |
|
306 | 304 | nt.assert_is(i['source'], None) |
|
307 | 305 | |
|
308 | 306 | |
|
309 | 307 | def test_property_sources(): |
|
310 | 308 | import zlib |
|
311 | 309 | |
|
312 | 310 | class A(object): |
|
313 | 311 | @property |
|
314 | 312 | def foo(self): |
|
315 | 313 | return 'bar' |
|
316 | 314 | |
|
317 | 315 | foo = foo.setter(lambda self, v: setattr(self, 'bar', v)) |
|
318 | 316 | |
|
319 | 317 | id = property(id) |
|
320 | 318 | compress = property(zlib.compress) |
|
321 | 319 | |
|
322 | 320 | i = inspector.info(A.foo, detail_level=1) |
|
323 | 321 | nt.assert_in('def foo(self):', i['source']) |
|
324 | 322 | nt.assert_in('lambda self, v:', i['source']) |
|
325 | 323 | |
|
326 | 324 | i = inspector.info(A.id, detail_level=1) |
|
327 | 325 | nt.assert_in('fget = <function id>', i['source']) |
|
328 | 326 | |
|
329 | 327 | i = inspector.info(A.compress, detail_level=1) |
|
330 | 328 | nt.assert_in('fget = <function zlib.compress>', i['source']) |
|
331 | 329 | |
|
332 | 330 | |
|
333 | 331 | def test_property_docstring_is_in_info_for_detail_level_0(): |
|
334 | 332 | class A(object): |
|
335 | 333 | @property |
|
336 | 334 | def foobar(self): |
|
337 | 335 | """This is `foobar` property.""" |
|
338 | 336 | pass |
|
339 | 337 | |
|
340 | 338 | ip.user_ns['a_obj'] = A() |
|
341 | 339 | nt.assert_equal( |
|
342 | 340 | 'This is `foobar` property.', |
|
343 | 341 | ip.object_inspect('a_obj.foobar', detail_level=0)['docstring']) |
|
344 | 342 | |
|
345 | 343 | ip.user_ns['a_cls'] = A |
|
346 | 344 | nt.assert_equal( |
|
347 | 345 | 'This is `foobar` property.', |
|
348 | 346 | ip.object_inspect('a_cls.foobar', detail_level=0)['docstring']) |
|
349 | 347 | |
|
350 | 348 | |
|
351 | 349 | def test_pdef(): |
|
352 | 350 | # See gh-1914 |
|
353 | 351 | def foo(): pass |
|
354 | 352 | inspector.pdef(foo, 'foo') |
|
355 | 353 | |
|
356 | 354 | |
|
357 | 355 | def test_pinfo_nonascii(): |
|
358 | 356 | # See gh-1177 |
|
359 | 357 | from . import nonascii2 |
|
360 | 358 | ip.user_ns['nonascii2'] = nonascii2 |
|
361 | 359 | ip._inspect('pinfo', 'nonascii2', detail_level=1) |
|
362 | 360 | |
|
363 | 361 | |
|
364 | 362 | def test_pinfo_docstring_no_source(): |
|
365 | 363 | """Docstring should be included with detail_level=1 if there is no source""" |
|
366 | 364 | with AssertPrints('Docstring:'): |
|
367 | 365 | ip._inspect('pinfo', 'str.format', detail_level=0) |
|
368 | 366 | with AssertPrints('Docstring:'): |
|
369 | 367 | ip._inspect('pinfo', 'str.format', detail_level=1) |
|
370 | 368 | |
|
371 | 369 | |
|
372 | 370 | def test_pinfo_no_docstring_if_source(): |
|
373 | 371 | """Docstring should not be included with detail_level=1 if source is found""" |
|
374 | 372 | def foo(): |
|
375 | 373 | """foo has a docstring""" |
|
376 | 374 | |
|
377 | 375 | ip.user_ns['foo'] = foo |
|
378 | 376 | |
|
379 | 377 | with AssertPrints('Docstring:'): |
|
380 | 378 | ip._inspect('pinfo', 'foo', detail_level=0) |
|
381 | 379 | with AssertPrints('Source:'): |
|
382 | 380 | ip._inspect('pinfo', 'foo', detail_level=1) |
|
383 | 381 | with AssertNotPrints('Docstring:'): |
|
384 | 382 | ip._inspect('pinfo', 'foo', detail_level=1) |
|
385 | 383 | |
|
386 | 384 | |
|
387 | 385 | def test_pinfo_docstring_if_detail_and_no_source(): |
|
388 | 386 | """ Docstring should be displayed if source info not available """ |
|
389 | 387 | obj_def = '''class Foo(object): |
|
390 | 388 | """ This is a docstring for Foo """ |
|
391 | 389 | def bar(self): |
|
392 | 390 | """ This is a docstring for Foo.bar """ |
|
393 | 391 | pass |
|
394 | 392 | ''' |
|
395 | 393 | |
|
396 | 394 | ip.run_cell(obj_def) |
|
397 | 395 | ip.run_cell('foo = Foo()') |
|
398 | 396 | |
|
399 | 397 | with AssertNotPrints("Source:"): |
|
400 | 398 | with AssertPrints('Docstring:'): |
|
401 | 399 | ip._inspect('pinfo', 'foo', detail_level=0) |
|
402 | 400 | with AssertPrints('Docstring:'): |
|
403 | 401 | ip._inspect('pinfo', 'foo', detail_level=1) |
|
404 | 402 | with AssertPrints('Docstring:'): |
|
405 | 403 | ip._inspect('pinfo', 'foo.bar', detail_level=0) |
|
406 | 404 | |
|
407 | 405 | with AssertNotPrints('Docstring:'): |
|
408 | 406 | with AssertPrints('Source:'): |
|
409 | 407 | ip._inspect('pinfo', 'foo.bar', detail_level=1) |
|
410 | 408 | |
|
411 | 409 | |
|
412 | 410 | def test_pinfo_magic(): |
|
413 | 411 | with AssertPrints('Docstring:'): |
|
414 | 412 | ip._inspect('pinfo', 'lsmagic', detail_level=0) |
|
415 | 413 | |
|
416 | 414 | with AssertPrints('Source:'): |
|
417 | 415 | ip._inspect('pinfo', 'lsmagic', detail_level=1) |
|
418 | 416 | |
|
419 | 417 | |
|
420 | 418 | def test_init_colors(): |
|
421 | 419 | # ensure colors are not present in signature info |
|
422 | 420 | info = inspector.info(HasSignature) |
|
423 | 421 | init_def = info['init_definition'] |
|
424 | 422 | nt.assert_not_in('[0m', init_def) |
|
425 | 423 | |
|
426 | 424 | |
|
427 | 425 | def test_builtin_init(): |
|
428 | 426 | info = inspector.info(list) |
|
429 | 427 | init_def = info['init_definition'] |
|
430 | 428 | # Python < 3.4 can't get init definition from builtins, |
|
431 | 429 | # but still exercise the inspection in case of error-raising bugs. |
|
432 | 430 | if sys.version_info >= (3,4): |
|
433 | 431 | nt.assert_is_not_none(init_def) |
|
434 | 432 |
@@ -1,39 +1,38 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | import nose.tools as nt |
|
3 | 3 | |
|
4 | 4 | from IPython.core.splitinput import split_user_input, LineInfo |
|
5 | 5 | from IPython.testing import tools as tt |
|
6 | from IPython.utils import py3compat | |
|
7 | 6 | |
|
8 | 7 | tests = [ |
|
9 | 8 | ('x=1', ('', '', 'x', '=1')), |
|
10 | 9 | ('?', ('', '?', '', '')), |
|
11 | 10 | ('??', ('', '??', '', '')), |
|
12 | 11 | (' ?', (' ', '?', '', '')), |
|
13 | 12 | (' ??', (' ', '??', '', '')), |
|
14 | 13 | ('??x', ('', '??', 'x', '')), |
|
15 | 14 | ('?x=1', ('', '?', 'x', '=1')), |
|
16 | 15 | ('!ls', ('', '!', 'ls', '')), |
|
17 | 16 | (' !ls', (' ', '!', 'ls', '')), |
|
18 | 17 | ('!!ls', ('', '!!', 'ls', '')), |
|
19 | 18 | (' !!ls', (' ', '!!', 'ls', '')), |
|
20 | 19 | (',ls', ('', ',', 'ls', '')), |
|
21 | 20 | (';ls', ('', ';', 'ls', '')), |
|
22 | 21 | (' ;ls', (' ', ';', 'ls', '')), |
|
23 | 22 | ('f.g(x)', ('', '', 'f.g', '(x)')), |
|
24 | 23 | ('f.g (x)', ('', '', 'f.g', '(x)')), |
|
25 | 24 | ('?%hist1', ('', '?', '%hist1', '')), |
|
26 | 25 | ('?%%hist2', ('', '?', '%%hist2', '')), |
|
27 | 26 | ('??%hist3', ('', '??', '%hist3', '')), |
|
28 | 27 | ('??%%hist4', ('', '??', '%%hist4', '')), |
|
29 | 28 | ('?x*', ('', '?', 'x*', '')), |
|
30 | 29 | ] |
|
31 | 30 | tests.append((u"Pérez Fernando", (u'', u'', u'Pérez', u'Fernando'))) |
|
32 | 31 | |
|
33 | 32 | def test_split_user_input(): |
|
34 | 33 | return tt.check_pairs(split_user_input, tests) |
|
35 | 34 | |
|
36 | 35 | def test_LineInfo(): |
|
37 | 36 | """Simple test for LineInfo construction and str()""" |
|
38 | 37 | linfo = LineInfo(' %cd /home') |
|
39 | 38 | nt.assert_equal(str(linfo), 'LineInfo [ |%|cd|/home]') |
@@ -1,1464 +1,1461 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """ |
|
3 | 3 | Verbose and colourful traceback formatting. |
|
4 | 4 | |
|
5 | 5 | **ColorTB** |
|
6 | 6 | |
|
7 | 7 | I've always found it a bit hard to visually parse tracebacks in Python. The |
|
8 | 8 | ColorTB class is a solution to that problem. It colors the different parts of a |
|
9 | 9 | traceback in a manner similar to what you would expect from a syntax-highlighting |
|
10 | 10 | text editor. |
|
11 | 11 | |
|
12 | 12 | Installation instructions for ColorTB:: |
|
13 | 13 | |
|
14 | 14 | import sys,ultratb |
|
15 | 15 | sys.excepthook = ultratb.ColorTB() |
|
16 | 16 | |
|
17 | 17 | **VerboseTB** |
|
18 | 18 | |
|
19 | 19 | I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds |
|
20 | 20 | of useful info when a traceback occurs. Ping originally had it spit out HTML |
|
21 | 21 | and intended it for CGI programmers, but why should they have all the fun? I |
|
22 | 22 | altered it to spit out colored text to the terminal. It's a bit overwhelming, |
|
23 | 23 | but kind of neat, and maybe useful for long-running programs that you believe |
|
24 | 24 | are bug-free. If a crash *does* occur in that type of program you want details. |
|
25 | 25 | Give it a shot--you'll love it or you'll hate it. |
|
26 | 26 | |
|
27 | 27 | .. note:: |
|
28 | 28 | |
|
29 | 29 | The Verbose mode prints the variables currently visible where the exception |
|
30 | 30 | happened (shortening their strings if too long). This can potentially be |
|
31 | 31 | very slow, if you happen to have a huge data structure whose string |
|
32 | 32 | representation is complex to compute. Your computer may appear to freeze for |
|
33 | 33 | a while with cpu usage at 100%. If this occurs, you can cancel the traceback |
|
34 | 34 | with Ctrl-C (maybe hitting it more than once). |
|
35 | 35 | |
|
36 | 36 | If you encounter this kind of situation often, you may want to use the |
|
37 | 37 | Verbose_novars mode instead of the regular Verbose, which avoids formatting |
|
38 | 38 | variables (but otherwise includes the information and context given by |
|
39 | 39 | Verbose). |
|
40 | 40 | |
|
41 | 41 | .. note:: |
|
42 | 42 | |
|
43 | 43 | The verbose mode print all variables in the stack, which means it can |
|
44 | 44 | potentially leak sensitive information like access keys, or unencryted |
|
45 | 45 | password. |
|
46 | 46 | |
|
47 | 47 | Installation instructions for VerboseTB:: |
|
48 | 48 | |
|
49 | 49 | import sys,ultratb |
|
50 | 50 | sys.excepthook = ultratb.VerboseTB() |
|
51 | 51 | |
|
52 | 52 | Note: Much of the code in this module was lifted verbatim from the standard |
|
53 | 53 | library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'. |
|
54 | 54 | |
|
55 | 55 | Color schemes |
|
56 | 56 | ------------- |
|
57 | 57 | |
|
58 | 58 | The colors are defined in the class TBTools through the use of the |
|
59 | 59 | ColorSchemeTable class. Currently the following exist: |
|
60 | 60 | |
|
61 | 61 | - NoColor: allows all of this module to be used in any terminal (the color |
|
62 | 62 | escapes are just dummy blank strings). |
|
63 | 63 | |
|
64 | 64 | - Linux: is meant to look good in a terminal like the Linux console (black |
|
65 | 65 | or very dark background). |
|
66 | 66 | |
|
67 | 67 | - LightBG: similar to Linux but swaps dark/light colors to be more readable |
|
68 | 68 | in light background terminals. |
|
69 | 69 | |
|
70 | 70 | - Neutral: a neutral color scheme that should be readable on both light and |
|
71 | 71 | dark background |
|
72 | 72 | |
|
73 | 73 | You can implement other color schemes easily, the syntax is fairly |
|
74 | 74 | self-explanatory. Please send back new schemes you develop to the author for |
|
75 | 75 | possible inclusion in future releases. |
|
76 | 76 | |
|
77 | 77 | Inheritance diagram: |
|
78 | 78 | |
|
79 | 79 | .. inheritance-diagram:: IPython.core.ultratb |
|
80 | 80 | :parts: 3 |
|
81 | 81 | """ |
|
82 | 82 | |
|
83 | 83 | #***************************************************************************** |
|
84 | 84 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> |
|
85 | 85 | # Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu> |
|
86 | 86 | # |
|
87 | 87 | # Distributed under the terms of the BSD License. The full license is in |
|
88 | 88 | # the file COPYING, distributed as part of this software. |
|
89 | 89 | #***************************************************************************** |
|
90 | 90 | |
|
91 | 91 | |
|
92 | 92 | import dis |
|
93 | 93 | import inspect |
|
94 | 94 | import keyword |
|
95 | 95 | import linecache |
|
96 | 96 | import os |
|
97 | 97 | import pydoc |
|
98 | 98 | import re |
|
99 | 99 | import sys |
|
100 | 100 | import time |
|
101 | 101 | import tokenize |
|
102 | 102 | import traceback |
|
103 | 103 | |
|
104 | 104 | try: # Python 2 |
|
105 | 105 | generate_tokens = tokenize.generate_tokens |
|
106 | 106 | except AttributeError: # Python 3 |
|
107 | 107 | generate_tokens = tokenize.tokenize |
|
108 | 108 | |
|
109 | 109 | # For purposes of monkeypatching inspect to fix a bug in it. |
|
110 | 110 | from inspect import getsourcefile, getfile, getmodule, \ |
|
111 | 111 | ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode |
|
112 | 112 | |
|
113 | 113 | # IPython's own modules |
|
114 | 114 | from IPython import get_ipython |
|
115 | 115 | from IPython.core import debugger |
|
116 | 116 | from IPython.core.display_trap import DisplayTrap |
|
117 | 117 | from IPython.core.excolors import exception_colors |
|
118 | 118 | from IPython.utils import PyColorize |
|
119 | 119 | from IPython.utils import openpy |
|
120 | 120 | from IPython.utils import path as util_path |
|
121 | 121 | from IPython.utils import py3compat |
|
122 | 122 | from IPython.utils.data import uniq_stable |
|
123 | 123 | from IPython.utils.terminal import get_terminal_size |
|
124 | 124 | from logging import info, error, debug |
|
125 | 125 | |
|
126 | 126 | import IPython.utils.colorable as colorable |
|
127 | 127 | |
|
128 | 128 | # Globals |
|
129 | 129 | # amount of space to put line numbers before verbose tracebacks |
|
130 | 130 | INDENT_SIZE = 8 |
|
131 | 131 | |
|
132 | 132 | # Default color scheme. This is used, for example, by the traceback |
|
133 | 133 | # formatter. When running in an actual IPython instance, the user's rc.colors |
|
134 | 134 | # value is used, but having a module global makes this functionality available |
|
135 | 135 | # to users of ultratb who are NOT running inside ipython. |
|
136 | 136 | DEFAULT_SCHEME = 'NoColor' |
|
137 | 137 | |
|
138 | 138 | # --------------------------------------------------------------------------- |
|
139 | 139 | # Code begins |
|
140 | 140 | |
|
141 | 141 | # Utility functions |
|
142 | 142 | def inspect_error(): |
|
143 | 143 | """Print a message about internal inspect errors. |
|
144 | 144 | |
|
145 | 145 | These are unfortunately quite common.""" |
|
146 | 146 | |
|
147 | 147 | error('Internal Python error in the inspect module.\n' |
|
148 | 148 | 'Below is the traceback from this internal error.\n') |
|
149 | 149 | |
|
150 | 150 | |
|
151 | 151 | # This function is a monkeypatch we apply to the Python inspect module. We have |
|
152 | 152 | # now found when it's needed (see discussion on issue gh-1456), and we have a |
|
153 | 153 | # test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if |
|
154 | 154 | # the monkeypatch is not applied. TK, Aug 2012. |
|
155 | 155 | def findsource(object): |
|
156 | 156 | """Return the entire source file and starting line number for an object. |
|
157 | 157 | |
|
158 | 158 | The argument may be a module, class, method, function, traceback, frame, |
|
159 | 159 | or code object. The source code is returned as a list of all the lines |
|
160 | 160 | in the file and the line number indexes a line in that list. An IOError |
|
161 | 161 | is raised if the source code cannot be retrieved. |
|
162 | 162 | |
|
163 | 163 | FIXED version with which we monkeypatch the stdlib to work around a bug.""" |
|
164 | 164 | |
|
165 | 165 | file = getsourcefile(object) or getfile(object) |
|
166 | 166 | # If the object is a frame, then trying to get the globals dict from its |
|
167 | 167 | # module won't work. Instead, the frame object itself has the globals |
|
168 | 168 | # dictionary. |
|
169 | 169 | globals_dict = None |
|
170 | 170 | if inspect.isframe(object): |
|
171 | 171 | # XXX: can this ever be false? |
|
172 | 172 | globals_dict = object.f_globals |
|
173 | 173 | else: |
|
174 | 174 | module = getmodule(object, file) |
|
175 | 175 | if module: |
|
176 | 176 | globals_dict = module.__dict__ |
|
177 | 177 | lines = linecache.getlines(file, globals_dict) |
|
178 | 178 | if not lines: |
|
179 | 179 | raise IOError('could not get source code') |
|
180 | 180 | |
|
181 | 181 | if ismodule(object): |
|
182 | 182 | return lines, 0 |
|
183 | 183 | |
|
184 | 184 | if isclass(object): |
|
185 | 185 | name = object.__name__ |
|
186 | 186 | pat = re.compile(r'^(\s*)class\s*' + name + r'\b') |
|
187 | 187 | # make some effort to find the best matching class definition: |
|
188 | 188 | # use the one with the least indentation, which is the one |
|
189 | 189 | # that's most probably not inside a function definition. |
|
190 | 190 | candidates = [] |
|
191 | 191 | for i, line in enumerate(lines): |
|
192 | 192 | match = pat.match(line) |
|
193 | 193 | if match: |
|
194 | 194 | # if it's at toplevel, it's already the best one |
|
195 | 195 | if line[0] == 'c': |
|
196 | 196 | return lines, i |
|
197 | 197 | # else add whitespace to candidate list |
|
198 | 198 | candidates.append((match.group(1), i)) |
|
199 | 199 | if candidates: |
|
200 | 200 | # this will sort by whitespace, and by line number, |
|
201 | 201 | # less whitespace first |
|
202 | 202 | candidates.sort() |
|
203 | 203 | return lines, candidates[0][1] |
|
204 | 204 | else: |
|
205 | 205 | raise IOError('could not find class definition') |
|
206 | 206 | |
|
207 | 207 | if ismethod(object): |
|
208 | 208 | object = object.__func__ |
|
209 | 209 | if isfunction(object): |
|
210 | 210 | object = object.__code__ |
|
211 | 211 | if istraceback(object): |
|
212 | 212 | object = object.tb_frame |
|
213 | 213 | if isframe(object): |
|
214 | 214 | object = object.f_code |
|
215 | 215 | if iscode(object): |
|
216 | 216 | if not hasattr(object, 'co_firstlineno'): |
|
217 | 217 | raise IOError('could not find function definition') |
|
218 | 218 | pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') |
|
219 | 219 | pmatch = pat.match |
|
220 | 220 | # fperez - fix: sometimes, co_firstlineno can give a number larger than |
|
221 | 221 | # the length of lines, which causes an error. Safeguard against that. |
|
222 | 222 | lnum = min(object.co_firstlineno, len(lines)) - 1 |
|
223 | 223 | while lnum > 0: |
|
224 | 224 | if pmatch(lines[lnum]): |
|
225 | 225 | break |
|
226 | 226 | lnum -= 1 |
|
227 | 227 | |
|
228 | 228 | return lines, lnum |
|
229 | 229 | raise IOError('could not find code object') |
|
230 | 230 | |
|
231 | 231 | |
|
232 | 232 | # This is a patched version of inspect.getargs that applies the (unmerged) |
|
233 | 233 | # patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes |
|
234 | 234 | # https://github.com/ipython/ipython/issues/8205 and |
|
235 | 235 | # https://github.com/ipython/ipython/issues/8293 |
|
236 | 236 | def getargs(co): |
|
237 | 237 | """Get information about the arguments accepted by a code object. |
|
238 | 238 | |
|
239 | 239 | Three things are returned: (args, varargs, varkw), where 'args' is |
|
240 | 240 | a list of argument names (possibly containing nested lists), and |
|
241 | 241 | 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" |
|
242 | 242 | if not iscode(co): |
|
243 | 243 | raise TypeError('{!r} is not a code object'.format(co)) |
|
244 | 244 | |
|
245 | 245 | nargs = co.co_argcount |
|
246 | 246 | names = co.co_varnames |
|
247 | 247 | args = list(names[:nargs]) |
|
248 | 248 | step = 0 |
|
249 | 249 | |
|
250 | 250 | # The following acrobatics are for anonymous (tuple) arguments. |
|
251 | 251 | for i in range(nargs): |
|
252 | 252 | if args[i][:1] in ('', '.'): |
|
253 | 253 | stack, remain, count = [], [], [] |
|
254 | 254 | while step < len(co.co_code): |
|
255 | 255 | op = ord(co.co_code[step]) |
|
256 | 256 | step = step + 1 |
|
257 | 257 | if op >= dis.HAVE_ARGUMENT: |
|
258 | 258 | opname = dis.opname[op] |
|
259 | 259 | value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 |
|
260 | 260 | step = step + 2 |
|
261 | 261 | if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): |
|
262 | 262 | remain.append(value) |
|
263 | 263 | count.append(value) |
|
264 | 264 | elif opname in ('STORE_FAST', 'STORE_DEREF'): |
|
265 | 265 | if op in dis.haslocal: |
|
266 | 266 | stack.append(co.co_varnames[value]) |
|
267 | 267 | elif op in dis.hasfree: |
|
268 | 268 | stack.append((co.co_cellvars + co.co_freevars)[value]) |
|
269 | 269 | # Special case for sublists of length 1: def foo((bar)) |
|
270 | 270 | # doesn't generate the UNPACK_TUPLE bytecode, so if |
|
271 | 271 | # `remain` is empty here, we have such a sublist. |
|
272 | 272 | if not remain: |
|
273 | 273 | stack[0] = [stack[0]] |
|
274 | 274 | break |
|
275 | 275 | else: |
|
276 | 276 | remain[-1] = remain[-1] - 1 |
|
277 | 277 | while remain[-1] == 0: |
|
278 | 278 | remain.pop() |
|
279 | 279 | size = count.pop() |
|
280 | 280 | stack[-size:] = [stack[-size:]] |
|
281 | 281 | if not remain: |
|
282 | 282 | break |
|
283 | 283 | remain[-1] = remain[-1] - 1 |
|
284 | 284 | if not remain: |
|
285 | 285 | break |
|
286 | 286 | args[i] = stack[0] |
|
287 | 287 | |
|
288 | 288 | varargs = None |
|
289 | 289 | if co.co_flags & inspect.CO_VARARGS: |
|
290 | 290 | varargs = co.co_varnames[nargs] |
|
291 | 291 | nargs = nargs + 1 |
|
292 | 292 | varkw = None |
|
293 | 293 | if co.co_flags & inspect.CO_VARKEYWORDS: |
|
294 | 294 | varkw = co.co_varnames[nargs] |
|
295 | 295 | return inspect.Arguments(args, varargs, varkw) |
|
296 | 296 | |
|
297 | 297 | |
|
298 | 298 | # Monkeypatch inspect to apply our bugfix. |
|
299 | 299 | def with_patch_inspect(f): |
|
300 | 300 | """ |
|
301 | 301 | Deprecated since IPython 6.0 |
|
302 | 302 | decorator for monkeypatching inspect.findsource |
|
303 | 303 | """ |
|
304 | 304 | |
|
305 | 305 | def wrapped(*args, **kwargs): |
|
306 | 306 | save_findsource = inspect.findsource |
|
307 | 307 | save_getargs = inspect.getargs |
|
308 | 308 | inspect.findsource = findsource |
|
309 | 309 | inspect.getargs = getargs |
|
310 | 310 | try: |
|
311 | 311 | return f(*args, **kwargs) |
|
312 | 312 | finally: |
|
313 | 313 | inspect.findsource = save_findsource |
|
314 | 314 | inspect.getargs = save_getargs |
|
315 | 315 | |
|
316 | 316 | return wrapped |
|
317 | 317 | |
|
318 | 318 | |
|
319 | 319 | def fix_frame_records_filenames(records): |
|
320 | 320 | """Try to fix the filenames in each record from inspect.getinnerframes(). |
|
321 | 321 | |
|
322 | 322 | Particularly, modules loaded from within zip files have useless filenames |
|
323 | 323 | attached to their code object, and inspect.getinnerframes() just uses it. |
|
324 | 324 | """ |
|
325 | 325 | fixed_records = [] |
|
326 | 326 | for frame, filename, line_no, func_name, lines, index in records: |
|
327 | 327 | # Look inside the frame's globals dictionary for __file__, |
|
328 | 328 | # which should be better. However, keep Cython filenames since |
|
329 | 329 | # we prefer the source filenames over the compiled .so file. |
|
330 | 330 | if not filename.endswith(('.pyx', '.pxd', '.pxi')): |
|
331 | 331 | better_fn = frame.f_globals.get('__file__', None) |
|
332 | 332 | if isinstance(better_fn, str): |
|
333 | 333 | # Check the type just in case someone did something weird with |
|
334 | 334 | # __file__. It might also be None if the error occurred during |
|
335 | 335 | # import. |
|
336 | 336 | filename = better_fn |
|
337 | 337 | fixed_records.append((frame, filename, line_no, func_name, lines, index)) |
|
338 | 338 | return fixed_records |
|
339 | 339 | |
|
340 | 340 | |
|
341 | 341 | @with_patch_inspect |
|
342 | 342 | def _fixed_getinnerframes(etb, context=1, tb_offset=0): |
|
343 | 343 | LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5 |
|
344 | 344 | |
|
345 | 345 | records = fix_frame_records_filenames(inspect.getinnerframes(etb, context)) |
|
346 | 346 | # If the error is at the console, don't build any context, since it would |
|
347 | 347 | # otherwise produce 5 blank lines printed out (there is no file at the |
|
348 | 348 | # console) |
|
349 | 349 | rec_check = records[tb_offset:] |
|
350 | 350 | try: |
|
351 | 351 | rname = rec_check[0][1] |
|
352 | 352 | if rname == '<ipython console>' or rname.endswith('<string>'): |
|
353 | 353 | return rec_check |
|
354 | 354 | except IndexError: |
|
355 | 355 | pass |
|
356 | 356 | |
|
357 | 357 | aux = traceback.extract_tb(etb) |
|
358 | 358 | assert len(records) == len(aux) |
|
359 | 359 | for i, (file, lnum, _, _) in enumerate(aux): |
|
360 | 360 | maybeStart = lnum - 1 - context // 2 |
|
361 | 361 | start = max(maybeStart, 0) |
|
362 | 362 | end = start + context |
|
363 | 363 | lines = linecache.getlines(file)[start:end] |
|
364 | 364 | buf = list(records[i]) |
|
365 | 365 | buf[LNUM_POS] = lnum |
|
366 | 366 | buf[INDEX_POS] = lnum - 1 - start |
|
367 | 367 | buf[LINES_POS] = lines |
|
368 | 368 | records[i] = tuple(buf) |
|
369 | 369 | return records[tb_offset:] |
|
370 | 370 | |
|
371 | 371 | # Helper function -- largely belongs to VerboseTB, but we need the same |
|
372 | 372 | # functionality to produce a pseudo verbose TB for SyntaxErrors, so that they |
|
373 | 373 | # can be recognized properly by ipython.el's py-traceback-line-re |
|
374 | 374 | # (SyntaxErrors have to be treated specially because they have no traceback) |
|
375 | 375 | |
|
376 | 376 | |
|
377 | 377 | def _format_traceback_lines(lnum, index, lines, Colors, lvals=None, _line_format=(lambda x,_:x,None)): |
|
378 | 378 | numbers_width = INDENT_SIZE - 1 |
|
379 | 379 | res = [] |
|
380 | 380 | i = lnum - index |
|
381 | 381 | |
|
382 | 382 | for line in lines: |
|
383 | 383 | line = py3compat.cast_unicode(line) |
|
384 | 384 | |
|
385 | 385 | new_line, err = _line_format(line, 'str') |
|
386 | 386 | if not err: line = new_line |
|
387 | 387 | |
|
388 | 388 | if i == lnum: |
|
389 | 389 | # This is the line with the error |
|
390 | 390 | pad = numbers_width - len(str(i)) |
|
391 | 391 | num = '%s%s' % (debugger.make_arrow(pad), str(lnum)) |
|
392 | 392 | line = '%s%s%s %s%s' % (Colors.linenoEm, num, |
|
393 | 393 | Colors.line, line, Colors.Normal) |
|
394 | 394 | else: |
|
395 | 395 | num = '%*s' % (numbers_width, i) |
|
396 | 396 | line = '%s%s%s %s' % (Colors.lineno, num, |
|
397 | 397 | Colors.Normal, line) |
|
398 | 398 | |
|
399 | 399 | res.append(line) |
|
400 | 400 | if lvals and i == lnum: |
|
401 | 401 | res.append(lvals + '\n') |
|
402 | 402 | i = i + 1 |
|
403 | 403 | return res |
|
404 | 404 | |
|
405 | 405 | def is_recursion_error(etype, value, records): |
|
406 | 406 | try: |
|
407 | 407 | # RecursionError is new in Python 3.5 |
|
408 | 408 | recursion_error_type = RecursionError |
|
409 | 409 | except NameError: |
|
410 | 410 | recursion_error_type = RuntimeError |
|
411 | 411 | |
|
412 | 412 | # The default recursion limit is 1000, but some of that will be taken up |
|
413 | 413 | # by stack frames in IPython itself. >500 frames probably indicates |
|
414 | 414 | # a recursion error. |
|
415 | 415 | return (etype is recursion_error_type) \ |
|
416 | 416 | and "recursion" in str(value).lower() \ |
|
417 | 417 | and len(records) > 500 |
|
418 | 418 | |
|
419 | 419 | def find_recursion(etype, value, records): |
|
420 | 420 | """Identify the repeating stack frames from a RecursionError traceback |
|
421 | 421 | |
|
422 | 422 | 'records' is a list as returned by VerboseTB.get_records() |
|
423 | 423 | |
|
424 | 424 | Returns (last_unique, repeat_length) |
|
425 | 425 | """ |
|
426 | 426 | # This involves a bit of guesswork - we want to show enough of the traceback |
|
427 | 427 | # to indicate where the recursion is occurring. We guess that the innermost |
|
428 | 428 | # quarter of the traceback (250 frames by default) is repeats, and find the |
|
429 | 429 | # first frame (from in to out) that looks different. |
|
430 | 430 | if not is_recursion_error(etype, value, records): |
|
431 | 431 | return len(records), 0 |
|
432 | 432 | |
|
433 | 433 | # Select filename, lineno, func_name to track frames with |
|
434 | 434 | records = [r[1:4] for r in records] |
|
435 | 435 | inner_frames = records[-(len(records)//4):] |
|
436 | 436 | frames_repeated = set(inner_frames) |
|
437 | 437 | |
|
438 | 438 | last_seen_at = {} |
|
439 | 439 | longest_repeat = 0 |
|
440 | 440 | i = len(records) |
|
441 | 441 | for frame in reversed(records): |
|
442 | 442 | i -= 1 |
|
443 | 443 | if frame not in frames_repeated: |
|
444 | 444 | last_unique = i |
|
445 | 445 | break |
|
446 | 446 | |
|
447 | 447 | if frame in last_seen_at: |
|
448 | 448 | distance = last_seen_at[frame] - i |
|
449 | 449 | longest_repeat = max(longest_repeat, distance) |
|
450 | 450 | |
|
451 | 451 | last_seen_at[frame] = i |
|
452 | 452 | else: |
|
453 | 453 | last_unique = 0 # The whole traceback was recursion |
|
454 | 454 | |
|
455 | 455 | return last_unique, longest_repeat |
|
456 | 456 | |
|
457 | 457 | #--------------------------------------------------------------------------- |
|
458 | 458 | # Module classes |
|
459 | 459 | class TBTools(colorable.Colorable): |
|
460 | 460 | """Basic tools used by all traceback printer classes.""" |
|
461 | 461 | |
|
462 | 462 | # Number of frames to skip when reporting tracebacks |
|
463 | 463 | tb_offset = 0 |
|
464 | 464 | |
|
465 | 465 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): |
|
466 | 466 | # Whether to call the interactive pdb debugger after printing |
|
467 | 467 | # tracebacks or not |
|
468 | 468 | super(TBTools, self).__init__(parent=parent, config=config) |
|
469 | 469 | self.call_pdb = call_pdb |
|
470 | 470 | |
|
471 | 471 | # Output stream to write to. Note that we store the original value in |
|
472 | 472 | # a private attribute and then make the public ostream a property, so |
|
473 | 473 | # that we can delay accessing sys.stdout until runtime. The way |
|
474 | 474 | # things are written now, the sys.stdout object is dynamically managed |
|
475 | 475 | # so a reference to it should NEVER be stored statically. This |
|
476 | 476 | # property approach confines this detail to a single location, and all |
|
477 | 477 | # subclasses can simply access self.ostream for writing. |
|
478 | 478 | self._ostream = ostream |
|
479 | 479 | |
|
480 | 480 | # Create color table |
|
481 | 481 | self.color_scheme_table = exception_colors() |
|
482 | 482 | |
|
483 | 483 | self.set_colors(color_scheme) |
|
484 | 484 | self.old_scheme = color_scheme # save initial value for toggles |
|
485 | 485 | |
|
486 | 486 | if call_pdb: |
|
487 | 487 | self.pdb = debugger.Pdb() |
|
488 | 488 | else: |
|
489 | 489 | self.pdb = None |
|
490 | 490 | |
|
491 | 491 | def _get_ostream(self): |
|
492 | 492 | """Output stream that exceptions are written to. |
|
493 | 493 | |
|
494 | 494 | Valid values are: |
|
495 | 495 | |
|
496 | 496 | - None: the default, which means that IPython will dynamically resolve |
|
497 | 497 | to sys.stdout. This ensures compatibility with most tools, including |
|
498 | 498 | Windows (where plain stdout doesn't recognize ANSI escapes). |
|
499 | 499 | |
|
500 | 500 | - Any object with 'write' and 'flush' attributes. |
|
501 | 501 | """ |
|
502 | 502 | return sys.stdout if self._ostream is None else self._ostream |
|
503 | 503 | |
|
504 | 504 | def _set_ostream(self, val): |
|
505 | 505 | assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush')) |
|
506 | 506 | self._ostream = val |
|
507 | 507 | |
|
508 | 508 | ostream = property(_get_ostream, _set_ostream) |
|
509 | 509 | |
|
510 | 510 | def set_colors(self, *args, **kw): |
|
511 | 511 | """Shorthand access to the color table scheme selector method.""" |
|
512 | 512 | |
|
513 | 513 | # Set own color table |
|
514 | 514 | self.color_scheme_table.set_active_scheme(*args, **kw) |
|
515 | 515 | # for convenience, set Colors to the active scheme |
|
516 | 516 | self.Colors = self.color_scheme_table.active_colors |
|
517 | 517 | # Also set colors of debugger |
|
518 | 518 | if hasattr(self, 'pdb') and self.pdb is not None: |
|
519 | 519 | self.pdb.set_colors(*args, **kw) |
|
520 | 520 | |
|
521 | 521 | def color_toggle(self): |
|
522 | 522 | """Toggle between the currently active color scheme and NoColor.""" |
|
523 | 523 | |
|
524 | 524 | if self.color_scheme_table.active_scheme_name == 'NoColor': |
|
525 | 525 | self.color_scheme_table.set_active_scheme(self.old_scheme) |
|
526 | 526 | self.Colors = self.color_scheme_table.active_colors |
|
527 | 527 | else: |
|
528 | 528 | self.old_scheme = self.color_scheme_table.active_scheme_name |
|
529 | 529 | self.color_scheme_table.set_active_scheme('NoColor') |
|
530 | 530 | self.Colors = self.color_scheme_table.active_colors |
|
531 | 531 | |
|
532 | 532 | def stb2text(self, stb): |
|
533 | 533 | """Convert a structured traceback (a list) to a string.""" |
|
534 | 534 | return '\n'.join(stb) |
|
535 | 535 | |
|
536 | 536 | def text(self, etype, value, tb, tb_offset=None, context=5): |
|
537 | 537 | """Return formatted traceback. |
|
538 | 538 | |
|
539 | 539 | Subclasses may override this if they add extra arguments. |
|
540 | 540 | """ |
|
541 | 541 | tb_list = self.structured_traceback(etype, value, tb, |
|
542 | 542 | tb_offset, context) |
|
543 | 543 | return self.stb2text(tb_list) |
|
544 | 544 | |
|
545 | 545 | def structured_traceback(self, etype, evalue, tb, tb_offset=None, |
|
546 | 546 | context=5, mode=None): |
|
547 | 547 | """Return a list of traceback frames. |
|
548 | 548 | |
|
549 | 549 | Must be implemented by each class. |
|
550 | 550 | """ |
|
551 | 551 | raise NotImplementedError() |
|
552 | 552 | |
|
553 | 553 | |
|
554 | 554 | #--------------------------------------------------------------------------- |
|
555 | 555 | class ListTB(TBTools): |
|
556 | 556 | """Print traceback information from a traceback list, with optional color. |
|
557 | 557 | |
|
558 | 558 | Calling requires 3 arguments: (etype, evalue, elist) |
|
559 | 559 | as would be obtained by:: |
|
560 | 560 | |
|
561 | 561 | etype, evalue, tb = sys.exc_info() |
|
562 | 562 | if tb: |
|
563 | 563 | elist = traceback.extract_tb(tb) |
|
564 | 564 | else: |
|
565 | 565 | elist = None |
|
566 | 566 | |
|
567 | 567 | It can thus be used by programs which need to process the traceback before |
|
568 | 568 | printing (such as console replacements based on the code module from the |
|
569 | 569 | standard library). |
|
570 | 570 | |
|
571 | 571 | Because they are meant to be called without a full traceback (only a |
|
572 | 572 | list), instances of this class can't call the interactive pdb debugger.""" |
|
573 | 573 | |
|
574 | 574 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): |
|
575 | 575 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
576 | 576 | ostream=ostream, parent=parent,config=config) |
|
577 | 577 | |
|
578 | 578 | def __call__(self, etype, value, elist): |
|
579 | 579 | self.ostream.flush() |
|
580 | 580 | self.ostream.write(self.text(etype, value, elist)) |
|
581 | 581 | self.ostream.write('\n') |
|
582 | 582 | |
|
583 | 583 | def structured_traceback(self, etype, value, elist, tb_offset=None, |
|
584 | 584 | context=5): |
|
585 | 585 | """Return a color formatted string with the traceback info. |
|
586 | 586 | |
|
587 | 587 | Parameters |
|
588 | 588 | ---------- |
|
589 | 589 | etype : exception type |
|
590 | 590 | Type of the exception raised. |
|
591 | 591 | |
|
592 | 592 | value : object |
|
593 | 593 | Data stored in the exception |
|
594 | 594 | |
|
595 | 595 | elist : list |
|
596 | 596 | List of frames, see class docstring for details. |
|
597 | 597 | |
|
598 | 598 | tb_offset : int, optional |
|
599 | 599 | Number of frames in the traceback to skip. If not given, the |
|
600 | 600 | instance value is used (set in constructor). |
|
601 | 601 | |
|
602 | 602 | context : int, optional |
|
603 | 603 | Number of lines of context information to print. |
|
604 | 604 | |
|
605 | 605 | Returns |
|
606 | 606 | ------- |
|
607 | 607 | String with formatted exception. |
|
608 | 608 | """ |
|
609 | 609 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
610 | 610 | Colors = self.Colors |
|
611 | 611 | out_list = [] |
|
612 | 612 | if elist: |
|
613 | 613 | |
|
614 | 614 | if tb_offset and len(elist) > tb_offset: |
|
615 | 615 | elist = elist[tb_offset:] |
|
616 | 616 | |
|
617 | 617 | out_list.append('Traceback %s(most recent call last)%s:' % |
|
618 | 618 | (Colors.normalEm, Colors.Normal) + '\n') |
|
619 | 619 | out_list.extend(self._format_list(elist)) |
|
620 | 620 | # The exception info should be a single entry in the list. |
|
621 | 621 | lines = ''.join(self._format_exception_only(etype, value)) |
|
622 | 622 | out_list.append(lines) |
|
623 | 623 | |
|
624 | 624 | # Note: this code originally read: |
|
625 | 625 | |
|
626 | 626 | ## for line in lines[:-1]: |
|
627 | 627 | ## out_list.append(" "+line) |
|
628 | 628 | ## out_list.append(lines[-1]) |
|
629 | 629 | |
|
630 | 630 | # This means it was indenting everything but the last line by a little |
|
631 | 631 | # bit. I've disabled this for now, but if we see ugliness somewhere we |
|
632 | 632 | # can restore it. |
|
633 | 633 | |
|
634 | 634 | return out_list |
|
635 | 635 | |
|
636 | 636 | def _format_list(self, extracted_list): |
|
637 | 637 | """Format a list of traceback entry tuples for printing. |
|
638 | 638 | |
|
639 | 639 | Given a list of tuples as returned by extract_tb() or |
|
640 | 640 | extract_stack(), return a list of strings ready for printing. |
|
641 | 641 | Each string in the resulting list corresponds to the item with the |
|
642 | 642 | same index in the argument list. Each string ends in a newline; |
|
643 | 643 | the strings may contain internal newlines as well, for those items |
|
644 | 644 | whose source text line is not None. |
|
645 | 645 | |
|
646 | 646 | Lifted almost verbatim from traceback.py |
|
647 | 647 | """ |
|
648 | 648 | |
|
649 | 649 | Colors = self.Colors |
|
650 | 650 | list = [] |
|
651 | 651 | for filename, lineno, name, line in extracted_list[:-1]: |
|
652 | 652 | item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \ |
|
653 | 653 | (Colors.filename, filename, Colors.Normal, |
|
654 | 654 | Colors.lineno, lineno, Colors.Normal, |
|
655 | 655 | Colors.name, name, Colors.Normal) |
|
656 | 656 | if line: |
|
657 | 657 | item += ' %s\n' % line.strip() |
|
658 | 658 | list.append(item) |
|
659 | 659 | # Emphasize the last entry |
|
660 | 660 | filename, lineno, name, line = extracted_list[-1] |
|
661 | 661 | item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \ |
|
662 | 662 | (Colors.normalEm, |
|
663 | 663 | Colors.filenameEm, filename, Colors.normalEm, |
|
664 | 664 | Colors.linenoEm, lineno, Colors.normalEm, |
|
665 | 665 | Colors.nameEm, name, Colors.normalEm, |
|
666 | 666 | Colors.Normal) |
|
667 | 667 | if line: |
|
668 | 668 | item += '%s %s%s\n' % (Colors.line, line.strip(), |
|
669 | 669 | Colors.Normal) |
|
670 | 670 | list.append(item) |
|
671 | 671 | return list |
|
672 | 672 | |
|
673 | 673 | def _format_exception_only(self, etype, value): |
|
674 | 674 | """Format the exception part of a traceback. |
|
675 | 675 | |
|
676 | 676 | The arguments are the exception type and value such as given by |
|
677 | 677 | sys.exc_info()[:2]. The return value is a list of strings, each ending |
|
678 | 678 | in a newline. Normally, the list contains a single string; however, |
|
679 | 679 | for SyntaxError exceptions, it contains several lines that (when |
|
680 | 680 | printed) display detailed information about where the syntax error |
|
681 | 681 | occurred. The message indicating which exception occurred is the |
|
682 | 682 | always last string in the list. |
|
683 | 683 | |
|
684 | 684 | Also lifted nearly verbatim from traceback.py |
|
685 | 685 | """ |
|
686 | 686 | have_filedata = False |
|
687 | 687 | Colors = self.Colors |
|
688 | 688 | list = [] |
|
689 | 689 | stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal) |
|
690 | 690 | if value is None: |
|
691 | 691 | # Not sure if this can still happen in Python 2.6 and above |
|
692 | 692 | list.append(stype + '\n') |
|
693 | 693 | else: |
|
694 | 694 | if issubclass(etype, SyntaxError): |
|
695 | 695 | have_filedata = True |
|
696 | 696 | if not value.filename: value.filename = "<string>" |
|
697 | 697 | if value.lineno: |
|
698 | 698 | lineno = value.lineno |
|
699 | 699 | textline = linecache.getline(value.filename, value.lineno) |
|
700 | 700 | else: |
|
701 | 701 | lineno = 'unknown' |
|
702 | 702 | textline = '' |
|
703 | 703 | list.append('%s File %s"%s"%s, line %s%s%s\n' % \ |
|
704 | 704 | (Colors.normalEm, |
|
705 | 705 | Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm, |
|
706 | 706 | Colors.linenoEm, lineno, Colors.Normal )) |
|
707 | 707 | if textline == '': |
|
708 | 708 | textline = py3compat.cast_unicode(value.text, "utf-8") |
|
709 | 709 | |
|
710 | 710 | if textline is not None: |
|
711 | 711 | i = 0 |
|
712 | 712 | while i < len(textline) and textline[i].isspace(): |
|
713 | 713 | i += 1 |
|
714 | 714 | list.append('%s %s%s\n' % (Colors.line, |
|
715 | 715 | textline.strip(), |
|
716 | 716 | Colors.Normal)) |
|
717 | 717 | if value.offset is not None: |
|
718 | 718 | s = ' ' |
|
719 | 719 | for c in textline[i:value.offset - 1]: |
|
720 | 720 | if c.isspace(): |
|
721 | 721 | s += c |
|
722 | 722 | else: |
|
723 | 723 | s += ' ' |
|
724 | 724 | list.append('%s%s^%s\n' % (Colors.caret, s, |
|
725 | 725 | Colors.Normal)) |
|
726 | 726 | |
|
727 | 727 | try: |
|
728 | 728 | s = value.msg |
|
729 | 729 | except Exception: |
|
730 | 730 | s = self._some_str(value) |
|
731 | 731 | if s: |
|
732 | 732 | list.append('%s%s:%s %s\n' % (stype, Colors.excName, |
|
733 | 733 | Colors.Normal, s)) |
|
734 | 734 | else: |
|
735 | 735 | list.append('%s\n' % stype) |
|
736 | 736 | |
|
737 | 737 | # sync with user hooks |
|
738 | 738 | if have_filedata: |
|
739 | 739 | ipinst = get_ipython() |
|
740 | 740 | if ipinst is not None: |
|
741 | 741 | ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0) |
|
742 | 742 | |
|
743 | 743 | return list |
|
744 | 744 | |
|
745 | 745 | def get_exception_only(self, etype, value): |
|
746 | 746 | """Only print the exception type and message, without a traceback. |
|
747 | 747 | |
|
748 | 748 | Parameters |
|
749 | 749 | ---------- |
|
750 | 750 | etype : exception type |
|
751 | 751 | value : exception value |
|
752 | 752 | """ |
|
753 | 753 | return ListTB.structured_traceback(self, etype, value, []) |
|
754 | 754 | |
|
755 | 755 | def show_exception_only(self, etype, evalue): |
|
756 | 756 | """Only print the exception type and message, without a traceback. |
|
757 | 757 | |
|
758 | 758 | Parameters |
|
759 | 759 | ---------- |
|
760 | 760 | etype : exception type |
|
761 | 761 | value : exception value |
|
762 | 762 | """ |
|
763 | 763 | # This method needs to use __call__ from *this* class, not the one from |
|
764 | 764 | # a subclass whose signature or behavior may be different |
|
765 | 765 | ostream = self.ostream |
|
766 | 766 | ostream.flush() |
|
767 | 767 | ostream.write('\n'.join(self.get_exception_only(etype, evalue))) |
|
768 | 768 | ostream.flush() |
|
769 | 769 | |
|
770 | 770 | def _some_str(self, value): |
|
771 | 771 | # Lifted from traceback.py |
|
772 | 772 | try: |
|
773 | 773 | return py3compat.cast_unicode(str(value)) |
|
774 | 774 | except: |
|
775 | 775 | return u'<unprintable %s object>' % type(value).__name__ |
|
776 | 776 | |
|
777 | 777 | |
|
778 | 778 | #---------------------------------------------------------------------------- |
|
779 | 779 | class VerboseTB(TBTools): |
|
780 | 780 | """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead |
|
781 | 781 | of HTML. Requires inspect and pydoc. Crazy, man. |
|
782 | 782 | |
|
783 | 783 | Modified version which optionally strips the topmost entries from the |
|
784 | 784 | traceback, to be used with alternate interpreters (because their own code |
|
785 | 785 | would appear in the traceback).""" |
|
786 | 786 | |
|
787 | 787 | def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None, |
|
788 | 788 | tb_offset=0, long_header=False, include_vars=True, |
|
789 | 789 | check_cache=None, debugger_cls = None, |
|
790 | 790 | parent=None, config=None): |
|
791 | 791 | """Specify traceback offset, headers and color scheme. |
|
792 | 792 | |
|
793 | 793 | Define how many frames to drop from the tracebacks. Calling it with |
|
794 | 794 | tb_offset=1 allows use of this handler in interpreters which will have |
|
795 | 795 | their own code at the top of the traceback (VerboseTB will first |
|
796 | 796 | remove that frame before printing the traceback info).""" |
|
797 | 797 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
798 | 798 | ostream=ostream, parent=parent, config=config) |
|
799 | 799 | self.tb_offset = tb_offset |
|
800 | 800 | self.long_header = long_header |
|
801 | 801 | self.include_vars = include_vars |
|
802 | 802 | # By default we use linecache.checkcache, but the user can provide a |
|
803 | 803 | # different check_cache implementation. This is used by the IPython |
|
804 | 804 | # kernel to provide tracebacks for interactive code that is cached, |
|
805 | 805 | # by a compiler instance that flushes the linecache but preserves its |
|
806 | 806 | # own code cache. |
|
807 | 807 | if check_cache is None: |
|
808 | 808 | check_cache = linecache.checkcache |
|
809 | 809 | self.check_cache = check_cache |
|
810 | 810 | |
|
811 | 811 | self.debugger_cls = debugger_cls or debugger.Pdb |
|
812 | 812 | |
|
813 | 813 | def format_records(self, records, last_unique, recursion_repeat): |
|
814 | 814 | """Format the stack frames of the traceback""" |
|
815 | 815 | frames = [] |
|
816 | 816 | for r in records[:last_unique+recursion_repeat+1]: |
|
817 | 817 | #print '*** record:',file,lnum,func,lines,index # dbg |
|
818 | 818 | frames.append(self.format_record(*r)) |
|
819 | 819 | |
|
820 | 820 | if recursion_repeat: |
|
821 | 821 | frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat) |
|
822 | 822 | frames.append(self.format_record(*records[last_unique+recursion_repeat+1])) |
|
823 | 823 | |
|
824 | 824 | return frames |
|
825 | 825 | |
|
826 | 826 | def format_record(self, frame, file, lnum, func, lines, index): |
|
827 | 827 | """Format a single stack frame""" |
|
828 | 828 | Colors = self.Colors # just a shorthand + quicker name lookup |
|
829 | 829 | ColorsNormal = Colors.Normal # used a lot |
|
830 | 830 | col_scheme = self.color_scheme_table.active_scheme_name |
|
831 | 831 | indent = ' ' * INDENT_SIZE |
|
832 | 832 | em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal) |
|
833 | 833 | undefined = '%sundefined%s' % (Colors.em, ColorsNormal) |
|
834 | 834 | tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal) |
|
835 | 835 | tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, |
|
836 | 836 | ColorsNormal) |
|
837 | 837 | tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ |
|
838 | 838 | (Colors.vName, Colors.valEm, ColorsNormal) |
|
839 | 839 | tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) |
|
840 | 840 | tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, |
|
841 | 841 | Colors.vName, ColorsNormal) |
|
842 | 842 | tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) |
|
843 | 843 | |
|
844 | 844 | tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal) |
|
845 | 845 | tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm, Colors.line, |
|
846 | 846 | ColorsNormal) |
|
847 | 847 | |
|
848 | 848 | abspath = os.path.abspath |
|
849 | 849 | |
|
850 | 850 | |
|
851 | 851 | if not file: |
|
852 | 852 | file = '?' |
|
853 | 853 | elif file.startswith(str("<")) and file.endswith(str(">")): |
|
854 | 854 | # Not a real filename, no problem... |
|
855 | 855 | pass |
|
856 | 856 | elif not os.path.isabs(file): |
|
857 | 857 | # Try to make the filename absolute by trying all |
|
858 | 858 | # sys.path entries (which is also what linecache does) |
|
859 | 859 | for dirname in sys.path: |
|
860 | 860 | try: |
|
861 | 861 | fullname = os.path.join(dirname, file) |
|
862 | 862 | if os.path.isfile(fullname): |
|
863 | 863 | file = os.path.abspath(fullname) |
|
864 | 864 | break |
|
865 | 865 | except Exception: |
|
866 | 866 | # Just in case that sys.path contains very |
|
867 | 867 | # strange entries... |
|
868 | 868 | pass |
|
869 | 869 | |
|
870 | 870 | file = py3compat.cast_unicode(file, util_path.fs_encoding) |
|
871 | 871 | link = tpl_link % util_path.compress_user(file) |
|
872 | 872 | args, varargs, varkw, locals = inspect.getargvalues(frame) |
|
873 | 873 | |
|
874 | 874 | if func == '?': |
|
875 | 875 | call = '' |
|
876 | 876 | else: |
|
877 | 877 | # Decide whether to include variable details or not |
|
878 | 878 | var_repr = self.include_vars and eqrepr or nullrepr |
|
879 | 879 | try: |
|
880 | 880 | call = tpl_call % (func, inspect.formatargvalues(args, |
|
881 | 881 | varargs, varkw, |
|
882 | 882 | locals, formatvalue=var_repr)) |
|
883 | 883 | except KeyError: |
|
884 | 884 | # This happens in situations like errors inside generator |
|
885 | 885 | # expressions, where local variables are listed in the |
|
886 | 886 | # line, but can't be extracted from the frame. I'm not |
|
887 | 887 | # 100% sure this isn't actually a bug in inspect itself, |
|
888 | 888 | # but since there's no info for us to compute with, the |
|
889 | 889 | # best we can do is report the failure and move on. Here |
|
890 | 890 | # we must *not* call any traceback construction again, |
|
891 | 891 | # because that would mess up use of %debug later on. So we |
|
892 | 892 | # simply report the failure and move on. The only |
|
893 | 893 | # limitation will be that this frame won't have locals |
|
894 | 894 | # listed in the call signature. Quite subtle problem... |
|
895 | 895 | # I can't think of a good way to validate this in a unit |
|
896 | 896 | # test, but running a script consisting of: |
|
897 | 897 | # dict( (k,v.strip()) for (k,v) in range(10) ) |
|
898 | 898 | # will illustrate the error, if this exception catch is |
|
899 | 899 | # disabled. |
|
900 | 900 | call = tpl_call_fail % func |
|
901 | 901 | |
|
902 | 902 | # Don't attempt to tokenize binary files. |
|
903 | 903 | if file.endswith(('.so', '.pyd', '.dll')): |
|
904 | 904 | return '%s %s\n' % (link, call) |
|
905 | 905 | |
|
906 | 906 | elif file.endswith(('.pyc', '.pyo')): |
|
907 | 907 | # Look up the corresponding source file. |
|
908 | 908 | try: |
|
909 | 909 | file = openpy.source_from_cache(file) |
|
910 | 910 | except ValueError: |
|
911 | 911 | # Failed to get the source file for some reason |
|
912 | 912 | # E.g. https://github.com/ipython/ipython/issues/9486 |
|
913 | 913 | return '%s %s\n' % (link, call) |
|
914 | 914 | |
|
915 | 915 | def linereader(file=file, lnum=[lnum], getline=linecache.getline): |
|
916 | 916 | line = getline(file, lnum[0]) |
|
917 | 917 | lnum[0] += 1 |
|
918 | 918 | return line |
|
919 | 919 | |
|
920 | 920 | # Build the list of names on this line of code where the exception |
|
921 | 921 | # occurred. |
|
922 | 922 | try: |
|
923 | 923 | names = [] |
|
924 | 924 | name_cont = False |
|
925 | 925 | |
|
926 | 926 | for token_type, token, start, end, line in generate_tokens(linereader): |
|
927 | 927 | # build composite names |
|
928 | 928 | if token_type == tokenize.NAME and token not in keyword.kwlist: |
|
929 | 929 | if name_cont: |
|
930 | 930 | # Continuation of a dotted name |
|
931 | 931 | try: |
|
932 | 932 | names[-1].append(token) |
|
933 | 933 | except IndexError: |
|
934 | 934 | names.append([token]) |
|
935 | 935 | name_cont = False |
|
936 | 936 | else: |
|
937 | 937 | # Regular new names. We append everything, the caller |
|
938 | 938 | # will be responsible for pruning the list later. It's |
|
939 | 939 | # very tricky to try to prune as we go, b/c composite |
|
940 | 940 | # names can fool us. The pruning at the end is easy |
|
941 | 941 | # to do (or the caller can print a list with repeated |
|
942 | 942 | # names if so desired. |
|
943 | 943 | names.append([token]) |
|
944 | 944 | elif token == '.': |
|
945 | 945 | name_cont = True |
|
946 | 946 | elif token_type == tokenize.NEWLINE: |
|
947 | 947 | break |
|
948 | 948 | |
|
949 | 949 | except (IndexError, UnicodeDecodeError, SyntaxError): |
|
950 | 950 | # signals exit of tokenizer |
|
951 | 951 | # SyntaxError can occur if the file is not actually Python |
|
952 | 952 | # - see gh-6300 |
|
953 | 953 | pass |
|
954 | 954 | except tokenize.TokenError as msg: |
|
955 | 955 | # Tokenizing may fail for various reasons, many of which are |
|
956 | 956 | # harmless. (A good example is when the line in question is the |
|
957 | 957 | # close of a triple-quoted string, cf gh-6864). We don't want to |
|
958 | 958 | # show this to users, but want make it available for debugging |
|
959 | 959 | # purposes. |
|
960 | 960 | _m = ("An unexpected error occurred while tokenizing input\n" |
|
961 | 961 | "The following traceback may be corrupted or invalid\n" |
|
962 | 962 | "The error message is: %s\n" % msg) |
|
963 | 963 | debug(_m) |
|
964 | 964 | |
|
965 | 965 | # Join composite names (e.g. "dict.fromkeys") |
|
966 | 966 | names = ['.'.join(n) for n in names] |
|
967 | 967 | # prune names list of duplicates, but keep the right order |
|
968 | 968 | unique_names = uniq_stable(names) |
|
969 | 969 | |
|
970 | 970 | # Start loop over vars |
|
971 | 971 | lvals = [] |
|
972 | 972 | if self.include_vars: |
|
973 | 973 | for name_full in unique_names: |
|
974 | 974 | name_base = name_full.split('.', 1)[0] |
|
975 | 975 | if name_base in frame.f_code.co_varnames: |
|
976 | 976 | if name_base in locals: |
|
977 | 977 | try: |
|
978 | 978 | value = repr(eval(name_full, locals)) |
|
979 | 979 | except: |
|
980 | 980 | value = undefined |
|
981 | 981 | else: |
|
982 | 982 | value = undefined |
|
983 | 983 | name = tpl_local_var % name_full |
|
984 | 984 | else: |
|
985 | 985 | if name_base in frame.f_globals: |
|
986 | 986 | try: |
|
987 | 987 | value = repr(eval(name_full, frame.f_globals)) |
|
988 | 988 | except: |
|
989 | 989 | value = undefined |
|
990 | 990 | else: |
|
991 | 991 | value = undefined |
|
992 | 992 | name = tpl_global_var % name_full |
|
993 | 993 | lvals.append(tpl_name_val % (name, value)) |
|
994 | 994 | if lvals: |
|
995 | 995 | lvals = '%s%s' % (indent, em_normal.join(lvals)) |
|
996 | 996 | else: |
|
997 | 997 | lvals = '' |
|
998 | 998 | |
|
999 | 999 | level = '%s %s\n' % (link, call) |
|
1000 | 1000 | |
|
1001 | 1001 | if index is None: |
|
1002 | 1002 | return level |
|
1003 | 1003 | else: |
|
1004 | 1004 | _line_format = PyColorize.Parser(style=col_scheme, parent=self).format2 |
|
1005 | 1005 | return '%s%s' % (level, ''.join( |
|
1006 | 1006 | _format_traceback_lines(lnum, index, lines, Colors, lvals, |
|
1007 | 1007 | _line_format))) |
|
1008 | 1008 | |
|
1009 | 1009 | def prepare_chained_exception_message(self, cause): |
|
1010 | 1010 | direct_cause = "\nThe above exception was the direct cause of the following exception:\n" |
|
1011 | 1011 | exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n" |
|
1012 | 1012 | |
|
1013 | 1013 | if cause: |
|
1014 | 1014 | message = [[direct_cause]] |
|
1015 | 1015 | else: |
|
1016 | 1016 | message = [[exception_during_handling]] |
|
1017 | 1017 | return message |
|
1018 | 1018 | |
|
1019 | 1019 | def prepare_header(self, etype, long_version=False): |
|
1020 | 1020 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1021 | 1021 | colorsnormal = colors.Normal # used a lot |
|
1022 | 1022 | exc = '%s%s%s' % (colors.excName, etype, colorsnormal) |
|
1023 | 1023 | width = min(75, get_terminal_size()[0]) |
|
1024 | 1024 | if long_version: |
|
1025 | 1025 | # Header with the exception type, python version, and date |
|
1026 | 1026 | pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable |
|
1027 | 1027 | date = time.ctime(time.time()) |
|
1028 | 1028 | |
|
1029 | 1029 | head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal, |
|
1030 | 1030 | exc, ' ' * (width - len(str(etype)) - len(pyver)), |
|
1031 | 1031 | pyver, date.rjust(width) ) |
|
1032 | 1032 | head += "\nA problem occurred executing Python code. Here is the sequence of function" \ |
|
1033 | 1033 | "\ncalls leading up to the error, with the most recent (innermost) call last." |
|
1034 | 1034 | else: |
|
1035 | 1035 | # Simplified header |
|
1036 | 1036 | head = '%s%s' % (exc, 'Traceback (most recent call last)'. \ |
|
1037 | 1037 | rjust(width - len(str(etype))) ) |
|
1038 | 1038 | |
|
1039 | 1039 | return head |
|
1040 | 1040 | |
|
1041 | 1041 | def format_exception(self, etype, evalue): |
|
1042 | 1042 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1043 | 1043 | colorsnormal = colors.Normal # used a lot |
|
1044 | 1044 | indent = ' ' * INDENT_SIZE |
|
1045 | 1045 | # Get (safely) a string form of the exception info |
|
1046 | 1046 | try: |
|
1047 | 1047 | etype_str, evalue_str = map(str, (etype, evalue)) |
|
1048 | 1048 | except: |
|
1049 | 1049 | # User exception is improperly defined. |
|
1050 | 1050 | etype, evalue = str, sys.exc_info()[:2] |
|
1051 | 1051 | etype_str, evalue_str = map(str, (etype, evalue)) |
|
1052 | 1052 | # ... and format it |
|
1053 | 1053 | return ['%s%s%s: %s' % (colors.excName, etype_str, |
|
1054 | 1054 | colorsnormal, py3compat.cast_unicode(evalue_str))] |
|
1055 | 1055 | |
|
1056 | 1056 | def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset): |
|
1057 | 1057 | """Formats the header, traceback and exception message for a single exception. |
|
1058 | 1058 | |
|
1059 | 1059 | This may be called multiple times by Python 3 exception chaining |
|
1060 | 1060 | (PEP 3134). |
|
1061 | 1061 | """ |
|
1062 | 1062 | # some locals |
|
1063 | 1063 | orig_etype = etype |
|
1064 | 1064 | try: |
|
1065 | 1065 | etype = etype.__name__ |
|
1066 | 1066 | except AttributeError: |
|
1067 | 1067 | pass |
|
1068 | 1068 | |
|
1069 | 1069 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
1070 | 1070 | head = self.prepare_header(etype, self.long_header) |
|
1071 | 1071 | records = self.get_records(etb, number_of_lines_of_context, tb_offset) |
|
1072 | 1072 | |
|
1073 | 1073 | if records is None: |
|
1074 | 1074 | return "" |
|
1075 | 1075 | |
|
1076 | 1076 | last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records) |
|
1077 | 1077 | |
|
1078 | 1078 | frames = self.format_records(records, last_unique, recursion_repeat) |
|
1079 | 1079 | |
|
1080 | 1080 | formatted_exception = self.format_exception(etype, evalue) |
|
1081 | 1081 | if records: |
|
1082 | 1082 | filepath, lnum = records[-1][1:3] |
|
1083 | 1083 | filepath = os.path.abspath(filepath) |
|
1084 | 1084 | ipinst = get_ipython() |
|
1085 | 1085 | if ipinst is not None: |
|
1086 | 1086 | ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) |
|
1087 | 1087 | |
|
1088 | 1088 | return [[head] + frames + [''.join(formatted_exception[0])]] |
|
1089 | 1089 | |
|
1090 | 1090 | def get_records(self, etb, number_of_lines_of_context, tb_offset): |
|
1091 | 1091 | try: |
|
1092 | 1092 | # Try the default getinnerframes and Alex's: Alex's fixes some |
|
1093 | 1093 | # problems, but it generates empty tracebacks for console errors |
|
1094 | 1094 | # (5 blanks lines) where none should be returned. |
|
1095 | 1095 | return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset) |
|
1096 | 1096 | except UnicodeDecodeError: |
|
1097 | 1097 | # This can occur if a file's encoding magic comment is wrong. |
|
1098 | 1098 | # I can't see a way to recover without duplicating a bunch of code |
|
1099 | 1099 | # from the stdlib traceback module. --TK |
|
1100 | 1100 | error('\nUnicodeDecodeError while processing traceback.\n') |
|
1101 | 1101 | return None |
|
1102 | 1102 | except: |
|
1103 | 1103 | # FIXME: I've been getting many crash reports from python 2.3 |
|
1104 | 1104 | # users, traceable to inspect.py. If I can find a small test-case |
|
1105 | 1105 | # to reproduce this, I should either write a better workaround or |
|
1106 | 1106 | # file a bug report against inspect (if that's the real problem). |
|
1107 | 1107 | # So far, I haven't been able to find an isolated example to |
|
1108 | 1108 | # reproduce the problem. |
|
1109 | 1109 | inspect_error() |
|
1110 | 1110 | traceback.print_exc(file=self.ostream) |
|
1111 | 1111 | info('\nUnfortunately, your original traceback can not be constructed.\n') |
|
1112 | 1112 | return None |
|
1113 | 1113 | |
|
1114 | 1114 | def get_parts_of_chained_exception(self, evalue): |
|
1115 | 1115 | def get_chained_exception(exception_value): |
|
1116 | 1116 | cause = getattr(exception_value, '__cause__', None) |
|
1117 | 1117 | if cause: |
|
1118 | 1118 | return cause |
|
1119 | 1119 | if getattr(exception_value, '__suppress_context__', False): |
|
1120 | 1120 | return None |
|
1121 | 1121 | return getattr(exception_value, '__context__', None) |
|
1122 | 1122 | |
|
1123 | 1123 | chained_evalue = get_chained_exception(evalue) |
|
1124 | 1124 | |
|
1125 | 1125 | if chained_evalue: |
|
1126 | 1126 | return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__ |
|
1127 | 1127 | |
|
1128 | 1128 | def structured_traceback(self, etype, evalue, etb, tb_offset=None, |
|
1129 | 1129 | number_of_lines_of_context=5): |
|
1130 | 1130 | """Return a nice text document describing the traceback.""" |
|
1131 | 1131 | |
|
1132 | 1132 | formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context, |
|
1133 | 1133 | tb_offset) |
|
1134 | 1134 | |
|
1135 | 1135 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1136 | 1136 | colorsnormal = colors.Normal # used a lot |
|
1137 | 1137 | head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal) |
|
1138 | 1138 | structured_traceback_parts = [head] |
|
1139 | if py3compat.PY3: | |
|
1140 | chained_exceptions_tb_offset = 0 | |
|
1141 | lines_of_context = 3 | |
|
1142 | formatted_exceptions = formatted_exception | |
|
1139 | chained_exceptions_tb_offset = 0 | |
|
1140 | lines_of_context = 3 | |
|
1141 | formatted_exceptions = formatted_exception | |
|
1142 | exception = self.get_parts_of_chained_exception(evalue) | |
|
1143 | if exception: | |
|
1144 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) | |
|
1145 | etype, evalue, etb = exception | |
|
1146 | else: | |
|
1147 | evalue = None | |
|
1148 | chained_exc_ids = set() | |
|
1149 | while evalue: | |
|
1150 | formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context, | |
|
1151 | chained_exceptions_tb_offset) | |
|
1143 | 1152 | exception = self.get_parts_of_chained_exception(evalue) |
|
1144 | if exception: | |
|
1153 | ||
|
1154 | if exception and not id(exception[1]) in chained_exc_ids: | |
|
1155 | chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop | |
|
1145 | 1156 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) |
|
1146 | 1157 | etype, evalue, etb = exception |
|
1147 | 1158 | else: |
|
1148 | 1159 | evalue = None |
|
1149 | chained_exc_ids = set() | |
|
1150 | while evalue: | |
|
1151 | formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context, | |
|
1152 | chained_exceptions_tb_offset) | |
|
1153 | exception = self.get_parts_of_chained_exception(evalue) | |
|
1154 | ||
|
1155 | if exception and not id(exception[1]) in chained_exc_ids: | |
|
1156 | chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop | |
|
1157 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) | |
|
1158 | etype, evalue, etb = exception | |
|
1159 | else: | |
|
1160 | evalue = None | |
|
1161 | 1160 | |
|
1162 |
|
|
|
1163 |
|
|
|
1164 |
|
|
|
1165 |
|
|
|
1166 | else: | |
|
1167 | structured_traceback_parts += formatted_exception[0] | |
|
1161 | # we want to see exceptions in a reversed order: | |
|
1162 | # the first exception should be on top | |
|
1163 | for formatted_exception in reversed(formatted_exceptions): | |
|
1164 | structured_traceback_parts += formatted_exception | |
|
1168 | 1165 | |
|
1169 | 1166 | return structured_traceback_parts |
|
1170 | 1167 | |
|
1171 | 1168 | def debugger(self, force=False): |
|
1172 | 1169 | """Call up the pdb debugger if desired, always clean up the tb |
|
1173 | 1170 | reference. |
|
1174 | 1171 | |
|
1175 | 1172 | Keywords: |
|
1176 | 1173 | |
|
1177 | 1174 | - force(False): by default, this routine checks the instance call_pdb |
|
1178 | 1175 | flag and does not actually invoke the debugger if the flag is false. |
|
1179 | 1176 | The 'force' option forces the debugger to activate even if the flag |
|
1180 | 1177 | is false. |
|
1181 | 1178 | |
|
1182 | 1179 | If the call_pdb flag is set, the pdb interactive debugger is |
|
1183 | 1180 | invoked. In all cases, the self.tb reference to the current traceback |
|
1184 | 1181 | is deleted to prevent lingering references which hamper memory |
|
1185 | 1182 | management. |
|
1186 | 1183 | |
|
1187 | 1184 | Note that each call to pdb() does an 'import readline', so if your app |
|
1188 | 1185 | requires a special setup for the readline completers, you'll have to |
|
1189 | 1186 | fix that by hand after invoking the exception handler.""" |
|
1190 | 1187 | |
|
1191 | 1188 | if force or self.call_pdb: |
|
1192 | 1189 | if self.pdb is None: |
|
1193 | 1190 | self.pdb = self.debugger_cls() |
|
1194 | 1191 | # the system displayhook may have changed, restore the original |
|
1195 | 1192 | # for pdb |
|
1196 | 1193 | display_trap = DisplayTrap(hook=sys.__displayhook__) |
|
1197 | 1194 | with display_trap: |
|
1198 | 1195 | self.pdb.reset() |
|
1199 | 1196 | # Find the right frame so we don't pop up inside ipython itself |
|
1200 | 1197 | if hasattr(self, 'tb') and self.tb is not None: |
|
1201 | 1198 | etb = self.tb |
|
1202 | 1199 | else: |
|
1203 | 1200 | etb = self.tb = sys.last_traceback |
|
1204 | 1201 | while self.tb is not None and self.tb.tb_next is not None: |
|
1205 | 1202 | self.tb = self.tb.tb_next |
|
1206 | 1203 | if etb and etb.tb_next: |
|
1207 | 1204 | etb = etb.tb_next |
|
1208 | 1205 | self.pdb.botframe = etb.tb_frame |
|
1209 | 1206 | self.pdb.interaction(self.tb.tb_frame, self.tb) |
|
1210 | 1207 | |
|
1211 | 1208 | if hasattr(self, 'tb'): |
|
1212 | 1209 | del self.tb |
|
1213 | 1210 | |
|
1214 | 1211 | def handler(self, info=None): |
|
1215 | 1212 | (etype, evalue, etb) = info or sys.exc_info() |
|
1216 | 1213 | self.tb = etb |
|
1217 | 1214 | ostream = self.ostream |
|
1218 | 1215 | ostream.flush() |
|
1219 | 1216 | ostream.write(self.text(etype, evalue, etb)) |
|
1220 | 1217 | ostream.write('\n') |
|
1221 | 1218 | ostream.flush() |
|
1222 | 1219 | |
|
1223 | 1220 | # Changed so an instance can just be called as VerboseTB_inst() and print |
|
1224 | 1221 | # out the right info on its own. |
|
1225 | 1222 | def __call__(self, etype=None, evalue=None, etb=None): |
|
1226 | 1223 | """This hook can replace sys.excepthook (for Python 2.1 or higher).""" |
|
1227 | 1224 | if etb is None: |
|
1228 | 1225 | self.handler() |
|
1229 | 1226 | else: |
|
1230 | 1227 | self.handler((etype, evalue, etb)) |
|
1231 | 1228 | try: |
|
1232 | 1229 | self.debugger() |
|
1233 | 1230 | except KeyboardInterrupt: |
|
1234 | 1231 | print("\nKeyboardInterrupt") |
|
1235 | 1232 | |
|
1236 | 1233 | |
|
1237 | 1234 | #---------------------------------------------------------------------------- |
|
1238 | 1235 | class FormattedTB(VerboseTB, ListTB): |
|
1239 | 1236 | """Subclass ListTB but allow calling with a traceback. |
|
1240 | 1237 | |
|
1241 | 1238 | It can thus be used as a sys.excepthook for Python > 2.1. |
|
1242 | 1239 | |
|
1243 | 1240 | Also adds 'Context' and 'Verbose' modes, not available in ListTB. |
|
1244 | 1241 | |
|
1245 | 1242 | Allows a tb_offset to be specified. This is useful for situations where |
|
1246 | 1243 | one needs to remove a number of topmost frames from the traceback (such as |
|
1247 | 1244 | occurs with python programs that themselves execute other python code, |
|
1248 | 1245 | like Python shells). """ |
|
1249 | 1246 | |
|
1250 | 1247 | def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False, |
|
1251 | 1248 | ostream=None, |
|
1252 | 1249 | tb_offset=0, long_header=False, include_vars=False, |
|
1253 | 1250 | check_cache=None, debugger_cls=None, |
|
1254 | 1251 | parent=None, config=None): |
|
1255 | 1252 | |
|
1256 | 1253 | # NEVER change the order of this list. Put new modes at the end: |
|
1257 | 1254 | self.valid_modes = ['Plain', 'Context', 'Verbose'] |
|
1258 | 1255 | self.verbose_modes = self.valid_modes[1:3] |
|
1259 | 1256 | |
|
1260 | 1257 | VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
1261 | 1258 | ostream=ostream, tb_offset=tb_offset, |
|
1262 | 1259 | long_header=long_header, include_vars=include_vars, |
|
1263 | 1260 | check_cache=check_cache, debugger_cls=debugger_cls, |
|
1264 | 1261 | parent=parent, config=config) |
|
1265 | 1262 | |
|
1266 | 1263 | # Different types of tracebacks are joined with different separators to |
|
1267 | 1264 | # form a single string. They are taken from this dict |
|
1268 | 1265 | self._join_chars = dict(Plain='', Context='\n', Verbose='\n') |
|
1269 | 1266 | # set_mode also sets the tb_join_char attribute |
|
1270 | 1267 | self.set_mode(mode) |
|
1271 | 1268 | |
|
1272 | 1269 | def _extract_tb(self, tb): |
|
1273 | 1270 | if tb: |
|
1274 | 1271 | return traceback.extract_tb(tb) |
|
1275 | 1272 | else: |
|
1276 | 1273 | return None |
|
1277 | 1274 | |
|
1278 | 1275 | def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5): |
|
1279 | 1276 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
1280 | 1277 | mode = self.mode |
|
1281 | 1278 | if mode in self.verbose_modes: |
|
1282 | 1279 | # Verbose modes need a full traceback |
|
1283 | 1280 | return VerboseTB.structured_traceback( |
|
1284 | 1281 | self, etype, value, tb, tb_offset, number_of_lines_of_context |
|
1285 | 1282 | ) |
|
1286 | 1283 | else: |
|
1287 | 1284 | # We must check the source cache because otherwise we can print |
|
1288 | 1285 | # out-of-date source code. |
|
1289 | 1286 | self.check_cache() |
|
1290 | 1287 | # Now we can extract and format the exception |
|
1291 | 1288 | elist = self._extract_tb(tb) |
|
1292 | 1289 | return ListTB.structured_traceback( |
|
1293 | 1290 | self, etype, value, elist, tb_offset, number_of_lines_of_context |
|
1294 | 1291 | ) |
|
1295 | 1292 | |
|
1296 | 1293 | def stb2text(self, stb): |
|
1297 | 1294 | """Convert a structured traceback (a list) to a string.""" |
|
1298 | 1295 | return self.tb_join_char.join(stb) |
|
1299 | 1296 | |
|
1300 | 1297 | |
|
1301 | 1298 | def set_mode(self, mode=None): |
|
1302 | 1299 | """Switch to the desired mode. |
|
1303 | 1300 | |
|
1304 | 1301 | If mode is not specified, cycles through the available modes.""" |
|
1305 | 1302 | |
|
1306 | 1303 | if not mode: |
|
1307 | 1304 | new_idx = (self.valid_modes.index(self.mode) + 1 ) % \ |
|
1308 | 1305 | len(self.valid_modes) |
|
1309 | 1306 | self.mode = self.valid_modes[new_idx] |
|
1310 | 1307 | elif mode not in self.valid_modes: |
|
1311 | 1308 | raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n' |
|
1312 | 1309 | 'Valid modes: ' + str(self.valid_modes)) |
|
1313 | 1310 | else: |
|
1314 | 1311 | self.mode = mode |
|
1315 | 1312 | # include variable details only in 'Verbose' mode |
|
1316 | 1313 | self.include_vars = (self.mode == self.valid_modes[2]) |
|
1317 | 1314 | # Set the join character for generating text tracebacks |
|
1318 | 1315 | self.tb_join_char = self._join_chars[self.mode] |
|
1319 | 1316 | |
|
1320 | 1317 | # some convenient shortcuts |
|
1321 | 1318 | def plain(self): |
|
1322 | 1319 | self.set_mode(self.valid_modes[0]) |
|
1323 | 1320 | |
|
1324 | 1321 | def context(self): |
|
1325 | 1322 | self.set_mode(self.valid_modes[1]) |
|
1326 | 1323 | |
|
1327 | 1324 | def verbose(self): |
|
1328 | 1325 | self.set_mode(self.valid_modes[2]) |
|
1329 | 1326 | |
|
1330 | 1327 | |
|
1331 | 1328 | #---------------------------------------------------------------------------- |
|
1332 | 1329 | class AutoFormattedTB(FormattedTB): |
|
1333 | 1330 | """A traceback printer which can be called on the fly. |
|
1334 | 1331 | |
|
1335 | 1332 | It will find out about exceptions by itself. |
|
1336 | 1333 | |
|
1337 | 1334 | A brief example:: |
|
1338 | 1335 | |
|
1339 | 1336 | AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux') |
|
1340 | 1337 | try: |
|
1341 | 1338 | ... |
|
1342 | 1339 | except: |
|
1343 | 1340 | AutoTB() # or AutoTB(out=logfile) where logfile is an open file object |
|
1344 | 1341 | """ |
|
1345 | 1342 | |
|
1346 | 1343 | def __call__(self, etype=None, evalue=None, etb=None, |
|
1347 | 1344 | out=None, tb_offset=None): |
|
1348 | 1345 | """Print out a formatted exception traceback. |
|
1349 | 1346 | |
|
1350 | 1347 | Optional arguments: |
|
1351 | 1348 | - out: an open file-like object to direct output to. |
|
1352 | 1349 | |
|
1353 | 1350 | - tb_offset: the number of frames to skip over in the stack, on a |
|
1354 | 1351 | per-call basis (this overrides temporarily the instance's tb_offset |
|
1355 | 1352 | given at initialization time. """ |
|
1356 | 1353 | |
|
1357 | 1354 | if out is None: |
|
1358 | 1355 | out = self.ostream |
|
1359 | 1356 | out.flush() |
|
1360 | 1357 | out.write(self.text(etype, evalue, etb, tb_offset)) |
|
1361 | 1358 | out.write('\n') |
|
1362 | 1359 | out.flush() |
|
1363 | 1360 | # FIXME: we should remove the auto pdb behavior from here and leave |
|
1364 | 1361 | # that to the clients. |
|
1365 | 1362 | try: |
|
1366 | 1363 | self.debugger() |
|
1367 | 1364 | except KeyboardInterrupt: |
|
1368 | 1365 | print("\nKeyboardInterrupt") |
|
1369 | 1366 | |
|
1370 | 1367 | def structured_traceback(self, etype=None, value=None, tb=None, |
|
1371 | 1368 | tb_offset=None, number_of_lines_of_context=5): |
|
1372 | 1369 | if etype is None: |
|
1373 | 1370 | etype, value, tb = sys.exc_info() |
|
1374 | 1371 | self.tb = tb |
|
1375 | 1372 | return FormattedTB.structured_traceback( |
|
1376 | 1373 | self, etype, value, tb, tb_offset, number_of_lines_of_context) |
|
1377 | 1374 | |
|
1378 | 1375 | |
|
1379 | 1376 | #--------------------------------------------------------------------------- |
|
1380 | 1377 | |
|
1381 | 1378 | # A simple class to preserve Nathan's original functionality. |
|
1382 | 1379 | class ColorTB(FormattedTB): |
|
1383 | 1380 | """Shorthand to initialize a FormattedTB in Linux colors mode.""" |
|
1384 | 1381 | |
|
1385 | 1382 | def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs): |
|
1386 | 1383 | FormattedTB.__init__(self, color_scheme=color_scheme, |
|
1387 | 1384 | call_pdb=call_pdb, **kwargs) |
|
1388 | 1385 | |
|
1389 | 1386 | |
|
1390 | 1387 | class SyntaxTB(ListTB): |
|
1391 | 1388 | """Extension which holds some state: the last exception value""" |
|
1392 | 1389 | |
|
1393 | 1390 | def __init__(self, color_scheme='NoColor', parent=None, config=None): |
|
1394 | 1391 | ListTB.__init__(self, color_scheme, parent=parent, config=config) |
|
1395 | 1392 | self.last_syntax_error = None |
|
1396 | 1393 | |
|
1397 | 1394 | def __call__(self, etype, value, elist): |
|
1398 | 1395 | self.last_syntax_error = value |
|
1399 | 1396 | |
|
1400 | 1397 | ListTB.__call__(self, etype, value, elist) |
|
1401 | 1398 | |
|
1402 | 1399 | def structured_traceback(self, etype, value, elist, tb_offset=None, |
|
1403 | 1400 | context=5): |
|
1404 | 1401 | # If the source file has been edited, the line in the syntax error can |
|
1405 | 1402 | # be wrong (retrieved from an outdated cache). This replaces it with |
|
1406 | 1403 | # the current value. |
|
1407 | 1404 | if isinstance(value, SyntaxError) \ |
|
1408 | 1405 | and isinstance(value.filename, str) \ |
|
1409 | 1406 | and isinstance(value.lineno, int): |
|
1410 | 1407 | linecache.checkcache(value.filename) |
|
1411 | 1408 | newtext = linecache.getline(value.filename, value.lineno) |
|
1412 | 1409 | if newtext: |
|
1413 | 1410 | value.text = newtext |
|
1414 | 1411 | self.last_syntax_error = value |
|
1415 | 1412 | return super(SyntaxTB, self).structured_traceback(etype, value, elist, |
|
1416 | 1413 | tb_offset=tb_offset, context=context) |
|
1417 | 1414 | |
|
1418 | 1415 | def clear_err_state(self): |
|
1419 | 1416 | """Return the current error state and clear it""" |
|
1420 | 1417 | e = self.last_syntax_error |
|
1421 | 1418 | self.last_syntax_error = None |
|
1422 | 1419 | return e |
|
1423 | 1420 | |
|
1424 | 1421 | def stb2text(self, stb): |
|
1425 | 1422 | """Convert a structured traceback (a list) to a string.""" |
|
1426 | 1423 | return ''.join(stb) |
|
1427 | 1424 | |
|
1428 | 1425 | |
|
1429 | 1426 | # some internal-use functions |
|
1430 | 1427 | def text_repr(value): |
|
1431 | 1428 | """Hopefully pretty robust repr equivalent.""" |
|
1432 | 1429 | # this is pretty horrible but should always return *something* |
|
1433 | 1430 | try: |
|
1434 | 1431 | return pydoc.text.repr(value) |
|
1435 | 1432 | except KeyboardInterrupt: |
|
1436 | 1433 | raise |
|
1437 | 1434 | except: |
|
1438 | 1435 | try: |
|
1439 | 1436 | return repr(value) |
|
1440 | 1437 | except KeyboardInterrupt: |
|
1441 | 1438 | raise |
|
1442 | 1439 | except: |
|
1443 | 1440 | try: |
|
1444 | 1441 | # all still in an except block so we catch |
|
1445 | 1442 | # getattr raising |
|
1446 | 1443 | name = getattr(value, '__name__', None) |
|
1447 | 1444 | if name: |
|
1448 | 1445 | # ick, recursion |
|
1449 | 1446 | return text_repr(name) |
|
1450 | 1447 | klass = getattr(value, '__class__', None) |
|
1451 | 1448 | if klass: |
|
1452 | 1449 | return '%s instance' % text_repr(klass) |
|
1453 | 1450 | except KeyboardInterrupt: |
|
1454 | 1451 | raise |
|
1455 | 1452 | except: |
|
1456 | 1453 | return 'UNRECOVERABLE REPR FAILURE' |
|
1457 | 1454 | |
|
1458 | 1455 | |
|
1459 | 1456 | def eqrepr(value, repr=text_repr): |
|
1460 | 1457 | return '=%s' % repr(value) |
|
1461 | 1458 | |
|
1462 | 1459 | |
|
1463 | 1460 | def nullrepr(value, repr=text_repr): |
|
1464 | 1461 | return '' |
@@ -1,525 +1,524 b'' | |||
|
1 | 1 | """IPython extension to reload modules before executing user code. |
|
2 | 2 | |
|
3 | 3 | ``autoreload`` reloads modules automatically before entering the execution of |
|
4 | 4 | code typed at the IPython prompt. |
|
5 | 5 | |
|
6 | 6 | This makes for example the following workflow possible: |
|
7 | 7 | |
|
8 | 8 | .. sourcecode:: ipython |
|
9 | 9 | |
|
10 | 10 | In [1]: %load_ext autoreload |
|
11 | 11 | |
|
12 | 12 | In [2]: %autoreload 2 |
|
13 | 13 | |
|
14 | 14 | In [3]: from foo import some_function |
|
15 | 15 | |
|
16 | 16 | In [4]: some_function() |
|
17 | 17 | Out[4]: 42 |
|
18 | 18 | |
|
19 | 19 | In [5]: # open foo.py in an editor and change some_function to return 43 |
|
20 | 20 | |
|
21 | 21 | In [6]: some_function() |
|
22 | 22 | Out[6]: 43 |
|
23 | 23 | |
|
24 | 24 | The module was reloaded without reloading it explicitly, and the object |
|
25 | 25 | imported with ``from foo import ...`` was also updated. |
|
26 | 26 | |
|
27 | 27 | Usage |
|
28 | 28 | ===== |
|
29 | 29 | |
|
30 | 30 | The following magic commands are provided: |
|
31 | 31 | |
|
32 | 32 | ``%autoreload`` |
|
33 | 33 | |
|
34 | 34 | Reload all modules (except those excluded by ``%aimport``) |
|
35 | 35 | automatically now. |
|
36 | 36 | |
|
37 | 37 | ``%autoreload 0`` |
|
38 | 38 | |
|
39 | 39 | Disable automatic reloading. |
|
40 | 40 | |
|
41 | 41 | ``%autoreload 1`` |
|
42 | 42 | |
|
43 | 43 | Reload all modules imported with ``%aimport`` every time before |
|
44 | 44 | executing the Python code typed. |
|
45 | 45 | |
|
46 | 46 | ``%autoreload 2`` |
|
47 | 47 | |
|
48 | 48 | Reload all modules (except those excluded by ``%aimport``) every |
|
49 | 49 | time before executing the Python code typed. |
|
50 | 50 | |
|
51 | 51 | ``%aimport`` |
|
52 | 52 | |
|
53 | 53 | List modules which are to be automatically imported or not to be imported. |
|
54 | 54 | |
|
55 | 55 | ``%aimport foo`` |
|
56 | 56 | |
|
57 | 57 | Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1`` |
|
58 | 58 | |
|
59 | 59 | ``%aimport foo, bar`` |
|
60 | 60 | |
|
61 | 61 | Import modules 'foo', 'bar' and mark them to be autoreloaded for ``%autoreload 1`` |
|
62 | 62 | |
|
63 | 63 | ``%aimport -foo`` |
|
64 | 64 | |
|
65 | 65 | Mark module 'foo' to not be autoreloaded. |
|
66 | 66 | |
|
67 | 67 | Caveats |
|
68 | 68 | ======= |
|
69 | 69 | |
|
70 | 70 | Reloading Python modules in a reliable way is in general difficult, |
|
71 | 71 | and unexpected things may occur. ``%autoreload`` tries to work around |
|
72 | 72 | common pitfalls by replacing function code objects and parts of |
|
73 | 73 | classes previously in the module with new versions. This makes the |
|
74 | 74 | following things to work: |
|
75 | 75 | |
|
76 | 76 | - Functions and classes imported via 'from xxx import foo' are upgraded |
|
77 | 77 | to new versions when 'xxx' is reloaded. |
|
78 | 78 | |
|
79 | 79 | - Methods and properties of classes are upgraded on reload, so that |
|
80 | 80 | calling 'c.foo()' on an object 'c' created before the reload causes |
|
81 | 81 | the new code for 'foo' to be executed. |
|
82 | 82 | |
|
83 | 83 | Some of the known remaining caveats are: |
|
84 | 84 | |
|
85 | 85 | - Replacing code objects does not always succeed: changing a @property |
|
86 | 86 | in a class to an ordinary method or a method to a member variable |
|
87 | 87 | can cause problems (but in old objects only). |
|
88 | 88 | |
|
89 | 89 | - Functions that are removed (eg. via monkey-patching) from a module |
|
90 | 90 | before it is reloaded are not upgraded. |
|
91 | 91 | |
|
92 | 92 | - C extension modules cannot be reloaded, and so cannot be autoreloaded. |
|
93 | 93 | """ |
|
94 | 94 | |
|
95 | 95 | skip_doctest = True |
|
96 | 96 | |
|
97 | 97 | #----------------------------------------------------------------------------- |
|
98 | 98 | # Copyright (C) 2000 Thomas Heller |
|
99 | 99 | # Copyright (C) 2008 Pauli Virtanen <pav@iki.fi> |
|
100 | 100 | # Copyright (C) 2012 The IPython Development Team |
|
101 | 101 | # |
|
102 | 102 | # Distributed under the terms of the BSD License. The full license is in |
|
103 | 103 | # the file COPYING, distributed as part of this software. |
|
104 | 104 | #----------------------------------------------------------------------------- |
|
105 | 105 | # |
|
106 | 106 | # This IPython module is written by Pauli Virtanen, based on the autoreload |
|
107 | 107 | # code by Thomas Heller. |
|
108 | 108 | |
|
109 | 109 | #----------------------------------------------------------------------------- |
|
110 | 110 | # Imports |
|
111 | 111 | #----------------------------------------------------------------------------- |
|
112 | 112 | |
|
113 | 113 | import os |
|
114 | 114 | import sys |
|
115 | 115 | import traceback |
|
116 | 116 | import types |
|
117 | 117 | import weakref |
|
118 | 118 | from importlib import import_module |
|
119 | from IPython.utils.py3compat import PY3 | |
|
120 | 119 | from imp import reload |
|
121 | 120 | |
|
122 | 121 | from IPython.utils import openpy |
|
123 | 122 | |
|
124 | 123 | #------------------------------------------------------------------------------ |
|
125 | 124 | # Autoreload functionality |
|
126 | 125 | #------------------------------------------------------------------------------ |
|
127 | 126 | |
|
128 | 127 | class ModuleReloader(object): |
|
129 | 128 | enabled = False |
|
130 | 129 | """Whether this reloader is enabled""" |
|
131 | 130 | |
|
132 | 131 | check_all = True |
|
133 | 132 | """Autoreload all modules, not just those listed in 'modules'""" |
|
134 | 133 | |
|
135 | 134 | def __init__(self): |
|
136 | 135 | # Modules that failed to reload: {module: mtime-on-failed-reload, ...} |
|
137 | 136 | self.failed = {} |
|
138 | 137 | # Modules specially marked as autoreloadable. |
|
139 | 138 | self.modules = {} |
|
140 | 139 | # Modules specially marked as not autoreloadable. |
|
141 | 140 | self.skip_modules = {} |
|
142 | 141 | # (module-name, name) -> weakref, for replacing old code objects |
|
143 | 142 | self.old_objects = {} |
|
144 | 143 | # Module modification timestamps |
|
145 | 144 | self.modules_mtimes = {} |
|
146 | 145 | |
|
147 | 146 | # Cache module modification times |
|
148 | 147 | self.check(check_all=True, do_reload=False) |
|
149 | 148 | |
|
150 | 149 | def mark_module_skipped(self, module_name): |
|
151 | 150 | """Skip reloading the named module in the future""" |
|
152 | 151 | try: |
|
153 | 152 | del self.modules[module_name] |
|
154 | 153 | except KeyError: |
|
155 | 154 | pass |
|
156 | 155 | self.skip_modules[module_name] = True |
|
157 | 156 | |
|
158 | 157 | def mark_module_reloadable(self, module_name): |
|
159 | 158 | """Reload the named module in the future (if it is imported)""" |
|
160 | 159 | try: |
|
161 | 160 | del self.skip_modules[module_name] |
|
162 | 161 | except KeyError: |
|
163 | 162 | pass |
|
164 | 163 | self.modules[module_name] = True |
|
165 | 164 | |
|
166 | 165 | def aimport_module(self, module_name): |
|
167 | 166 | """Import a module, and mark it reloadable |
|
168 | 167 | |
|
169 | 168 | Returns |
|
170 | 169 | ------- |
|
171 | 170 | top_module : module |
|
172 | 171 | The imported module if it is top-level, or the top-level |
|
173 | 172 | top_name : module |
|
174 | 173 | Name of top_module |
|
175 | 174 | |
|
176 | 175 | """ |
|
177 | 176 | self.mark_module_reloadable(module_name) |
|
178 | 177 | |
|
179 | 178 | import_module(module_name) |
|
180 | 179 | top_name = module_name.split('.')[0] |
|
181 | 180 | top_module = sys.modules[top_name] |
|
182 | 181 | return top_module, top_name |
|
183 | 182 | |
|
184 | 183 | def filename_and_mtime(self, module): |
|
185 | 184 | if not hasattr(module, '__file__') or module.__file__ is None: |
|
186 | 185 | return None, None |
|
187 | 186 | |
|
188 | 187 | if getattr(module, '__name__', None) in ['__mp_main__', '__main__']: |
|
189 | 188 | # we cannot reload(__main__) or reload(__mp_main__) |
|
190 | 189 | return None, None |
|
191 | 190 | |
|
192 | 191 | filename = module.__file__ |
|
193 | 192 | path, ext = os.path.splitext(filename) |
|
194 | 193 | |
|
195 | 194 | if ext.lower() == '.py': |
|
196 | 195 | py_filename = filename |
|
197 | 196 | else: |
|
198 | 197 | try: |
|
199 | 198 | py_filename = openpy.source_from_cache(filename) |
|
200 | 199 | except ValueError: |
|
201 | 200 | return None, None |
|
202 | 201 | |
|
203 | 202 | try: |
|
204 | 203 | pymtime = os.stat(py_filename).st_mtime |
|
205 | 204 | except OSError: |
|
206 | 205 | return None, None |
|
207 | 206 | |
|
208 | 207 | return py_filename, pymtime |
|
209 | 208 | |
|
210 | 209 | def check(self, check_all=False, do_reload=True): |
|
211 | 210 | """Check whether some modules need to be reloaded.""" |
|
212 | 211 | |
|
213 | 212 | if not self.enabled and not check_all: |
|
214 | 213 | return |
|
215 | 214 | |
|
216 | 215 | if check_all or self.check_all: |
|
217 | 216 | modules = list(sys.modules.keys()) |
|
218 | 217 | else: |
|
219 | 218 | modules = list(self.modules.keys()) |
|
220 | 219 | |
|
221 | 220 | for modname in modules: |
|
222 | 221 | m = sys.modules.get(modname, None) |
|
223 | 222 | |
|
224 | 223 | if modname in self.skip_modules: |
|
225 | 224 | continue |
|
226 | 225 | |
|
227 | 226 | py_filename, pymtime = self.filename_and_mtime(m) |
|
228 | 227 | if py_filename is None: |
|
229 | 228 | continue |
|
230 | 229 | |
|
231 | 230 | try: |
|
232 | 231 | if pymtime <= self.modules_mtimes[modname]: |
|
233 | 232 | continue |
|
234 | 233 | except KeyError: |
|
235 | 234 | self.modules_mtimes[modname] = pymtime |
|
236 | 235 | continue |
|
237 | 236 | else: |
|
238 | 237 | if self.failed.get(py_filename, None) == pymtime: |
|
239 | 238 | continue |
|
240 | 239 | |
|
241 | 240 | self.modules_mtimes[modname] = pymtime |
|
242 | 241 | |
|
243 | 242 | # If we've reached this point, we should try to reload the module |
|
244 | 243 | if do_reload: |
|
245 | 244 | try: |
|
246 | 245 | superreload(m, reload, self.old_objects) |
|
247 | 246 | if py_filename in self.failed: |
|
248 | 247 | del self.failed[py_filename] |
|
249 | 248 | except: |
|
250 | 249 | print("[autoreload of %s failed: %s]" % ( |
|
251 | 250 | modname, traceback.format_exc(10)), file=sys.stderr) |
|
252 | 251 | self.failed[py_filename] = pymtime |
|
253 | 252 | |
|
254 | 253 | #------------------------------------------------------------------------------ |
|
255 | 254 | # superreload |
|
256 | 255 | #------------------------------------------------------------------------------ |
|
257 | 256 | |
|
258 | 257 | |
|
259 | 258 | func_attrs = ['__code__', '__defaults__', '__doc__', |
|
260 | 259 | '__closure__', '__globals__', '__dict__'] |
|
261 | 260 | |
|
262 | 261 | |
|
263 | 262 | def update_function(old, new): |
|
264 | 263 | """Upgrade the code object of a function""" |
|
265 | 264 | for name in func_attrs: |
|
266 | 265 | try: |
|
267 | 266 | setattr(old, name, getattr(new, name)) |
|
268 | 267 | except (AttributeError, TypeError): |
|
269 | 268 | pass |
|
270 | 269 | |
|
271 | 270 | |
|
272 | 271 | def update_class(old, new): |
|
273 | 272 | """Replace stuff in the __dict__ of a class, and upgrade |
|
274 | 273 | method code objects""" |
|
275 | 274 | for key in list(old.__dict__.keys()): |
|
276 | 275 | old_obj = getattr(old, key) |
|
277 | 276 | try: |
|
278 | 277 | new_obj = getattr(new, key) |
|
279 | 278 | if old_obj == new_obj: |
|
280 | 279 | continue |
|
281 | 280 | except AttributeError: |
|
282 | 281 | # obsolete attribute: remove it |
|
283 | 282 | try: |
|
284 | 283 | delattr(old, key) |
|
285 | 284 | except (AttributeError, TypeError): |
|
286 | 285 | pass |
|
287 | 286 | continue |
|
288 | 287 | |
|
289 | 288 | if update_generic(old_obj, new_obj): continue |
|
290 | 289 | |
|
291 | 290 | try: |
|
292 | 291 | setattr(old, key, getattr(new, key)) |
|
293 | 292 | except (AttributeError, TypeError): |
|
294 | 293 | pass # skip non-writable attributes |
|
295 | 294 | |
|
296 | 295 | |
|
297 | 296 | def update_property(old, new): |
|
298 | 297 | """Replace get/set/del functions of a property""" |
|
299 | 298 | update_generic(old.fdel, new.fdel) |
|
300 | 299 | update_generic(old.fget, new.fget) |
|
301 | 300 | update_generic(old.fset, new.fset) |
|
302 | 301 | |
|
303 | 302 | |
|
304 | 303 | def isinstance2(a, b, typ): |
|
305 | 304 | return isinstance(a, typ) and isinstance(b, typ) |
|
306 | 305 | |
|
307 | 306 | |
|
308 | 307 | UPDATE_RULES = [ |
|
309 | 308 | (lambda a, b: isinstance2(a, b, type), |
|
310 | 309 | update_class), |
|
311 | 310 | (lambda a, b: isinstance2(a, b, types.FunctionType), |
|
312 | 311 | update_function), |
|
313 | 312 | (lambda a, b: isinstance2(a, b, property), |
|
314 | 313 | update_property), |
|
315 | 314 | ] |
|
316 | 315 | UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType), |
|
317 | 316 | lambda a, b: update_function(a.__func__, b.__func__)), |
|
318 | 317 | ]) |
|
319 | 318 | |
|
320 | 319 | |
|
321 | 320 | def update_generic(a, b): |
|
322 | 321 | for type_check, update in UPDATE_RULES: |
|
323 | 322 | if type_check(a, b): |
|
324 | 323 | update(a, b) |
|
325 | 324 | return True |
|
326 | 325 | return False |
|
327 | 326 | |
|
328 | 327 | |
|
329 | 328 | class StrongRef(object): |
|
330 | 329 | def __init__(self, obj): |
|
331 | 330 | self.obj = obj |
|
332 | 331 | def __call__(self): |
|
333 | 332 | return self.obj |
|
334 | 333 | |
|
335 | 334 | |
|
336 | 335 | def superreload(module, reload=reload, old_objects={}): |
|
337 | 336 | """Enhanced version of the builtin reload function. |
|
338 | 337 | |
|
339 | 338 | superreload remembers objects previously in the module, and |
|
340 | 339 | |
|
341 | 340 | - upgrades the class dictionary of every old class in the module |
|
342 | 341 | - upgrades the code object of every old function and method |
|
343 | 342 | - clears the module's namespace before reloading |
|
344 | 343 | |
|
345 | 344 | """ |
|
346 | 345 | |
|
347 | 346 | # collect old objects in the module |
|
348 | 347 | for name, obj in list(module.__dict__.items()): |
|
349 | 348 | if not hasattr(obj, '__module__') or obj.__module__ != module.__name__: |
|
350 | 349 | continue |
|
351 | 350 | key = (module.__name__, name) |
|
352 | 351 | try: |
|
353 | 352 | old_objects.setdefault(key, []).append(weakref.ref(obj)) |
|
354 | 353 | except TypeError: |
|
355 | 354 | pass |
|
356 | 355 | |
|
357 | 356 | # reload module |
|
358 | 357 | try: |
|
359 | 358 | # clear namespace first from old cruft |
|
360 | 359 | old_dict = module.__dict__.copy() |
|
361 | 360 | old_name = module.__name__ |
|
362 | 361 | module.__dict__.clear() |
|
363 | 362 | module.__dict__['__name__'] = old_name |
|
364 | 363 | module.__dict__['__loader__'] = old_dict['__loader__'] |
|
365 | 364 | except (TypeError, AttributeError, KeyError): |
|
366 | 365 | pass |
|
367 | 366 | |
|
368 | 367 | try: |
|
369 | 368 | module = reload(module) |
|
370 | 369 | except: |
|
371 | 370 | # restore module dictionary on failed reload |
|
372 | 371 | module.__dict__.update(old_dict) |
|
373 | 372 | raise |
|
374 | 373 | |
|
375 | 374 | # iterate over all objects and update functions & classes |
|
376 | 375 | for name, new_obj in list(module.__dict__.items()): |
|
377 | 376 | key = (module.__name__, name) |
|
378 | 377 | if key not in old_objects: continue |
|
379 | 378 | |
|
380 | 379 | new_refs = [] |
|
381 | 380 | for old_ref in old_objects[key]: |
|
382 | 381 | old_obj = old_ref() |
|
383 | 382 | if old_obj is None: continue |
|
384 | 383 | new_refs.append(old_ref) |
|
385 | 384 | update_generic(old_obj, new_obj) |
|
386 | 385 | |
|
387 | 386 | if new_refs: |
|
388 | 387 | old_objects[key] = new_refs |
|
389 | 388 | else: |
|
390 | 389 | del old_objects[key] |
|
391 | 390 | |
|
392 | 391 | return module |
|
393 | 392 | |
|
394 | 393 | #------------------------------------------------------------------------------ |
|
395 | 394 | # IPython connectivity |
|
396 | 395 | #------------------------------------------------------------------------------ |
|
397 | 396 | |
|
398 | 397 | from IPython.core.magic import Magics, magics_class, line_magic |
|
399 | 398 | |
|
400 | 399 | @magics_class |
|
401 | 400 | class AutoreloadMagics(Magics): |
|
402 | 401 | def __init__(self, *a, **kw): |
|
403 | 402 | super(AutoreloadMagics, self).__init__(*a, **kw) |
|
404 | 403 | self._reloader = ModuleReloader() |
|
405 | 404 | self._reloader.check_all = False |
|
406 | 405 | self.loaded_modules = set(sys.modules) |
|
407 | 406 | |
|
408 | 407 | @line_magic |
|
409 | 408 | def autoreload(self, parameter_s=''): |
|
410 | 409 | r"""%autoreload => Reload modules automatically |
|
411 | 410 | |
|
412 | 411 | %autoreload |
|
413 | 412 | Reload all modules (except those excluded by %aimport) automatically |
|
414 | 413 | now. |
|
415 | 414 | |
|
416 | 415 | %autoreload 0 |
|
417 | 416 | Disable automatic reloading. |
|
418 | 417 | |
|
419 | 418 | %autoreload 1 |
|
420 | 419 | Reload all modules imported with %aimport every time before executing |
|
421 | 420 | the Python code typed. |
|
422 | 421 | |
|
423 | 422 | %autoreload 2 |
|
424 | 423 | Reload all modules (except those excluded by %aimport) every time |
|
425 | 424 | before executing the Python code typed. |
|
426 | 425 | |
|
427 | 426 | Reloading Python modules in a reliable way is in general |
|
428 | 427 | difficult, and unexpected things may occur. %autoreload tries to |
|
429 | 428 | work around common pitfalls by replacing function code objects and |
|
430 | 429 | parts of classes previously in the module with new versions. This |
|
431 | 430 | makes the following things to work: |
|
432 | 431 | |
|
433 | 432 | - Functions and classes imported via 'from xxx import foo' are upgraded |
|
434 | 433 | to new versions when 'xxx' is reloaded. |
|
435 | 434 | |
|
436 | 435 | - Methods and properties of classes are upgraded on reload, so that |
|
437 | 436 | calling 'c.foo()' on an object 'c' created before the reload causes |
|
438 | 437 | the new code for 'foo' to be executed. |
|
439 | 438 | |
|
440 | 439 | Some of the known remaining caveats are: |
|
441 | 440 | |
|
442 | 441 | - Replacing code objects does not always succeed: changing a @property |
|
443 | 442 | in a class to an ordinary method or a method to a member variable |
|
444 | 443 | can cause problems (but in old objects only). |
|
445 | 444 | |
|
446 | 445 | - Functions that are removed (eg. via monkey-patching) from a module |
|
447 | 446 | before it is reloaded are not upgraded. |
|
448 | 447 | |
|
449 | 448 | - C extension modules cannot be reloaded, and so cannot be |
|
450 | 449 | autoreloaded. |
|
451 | 450 | |
|
452 | 451 | """ |
|
453 | 452 | if parameter_s == '': |
|
454 | 453 | self._reloader.check(True) |
|
455 | 454 | elif parameter_s == '0': |
|
456 | 455 | self._reloader.enabled = False |
|
457 | 456 | elif parameter_s == '1': |
|
458 | 457 | self._reloader.check_all = False |
|
459 | 458 | self._reloader.enabled = True |
|
460 | 459 | elif parameter_s == '2': |
|
461 | 460 | self._reloader.check_all = True |
|
462 | 461 | self._reloader.enabled = True |
|
463 | 462 | |
|
464 | 463 | @line_magic |
|
465 | 464 | def aimport(self, parameter_s='', stream=None): |
|
466 | 465 | """%aimport => Import modules for automatic reloading. |
|
467 | 466 | |
|
468 | 467 | %aimport |
|
469 | 468 | List modules to automatically import and not to import. |
|
470 | 469 | |
|
471 | 470 | %aimport foo |
|
472 | 471 | Import module 'foo' and mark it to be autoreloaded for %autoreload 1 |
|
473 | 472 | |
|
474 | 473 | %aimport foo, bar |
|
475 | 474 | Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1 |
|
476 | 475 | |
|
477 | 476 | %aimport -foo |
|
478 | 477 | Mark module 'foo' to not be autoreloaded for %autoreload 1 |
|
479 | 478 | """ |
|
480 | 479 | modname = parameter_s |
|
481 | 480 | if not modname: |
|
482 | 481 | to_reload = sorted(self._reloader.modules.keys()) |
|
483 | 482 | to_skip = sorted(self._reloader.skip_modules.keys()) |
|
484 | 483 | if stream is None: |
|
485 | 484 | stream = sys.stdout |
|
486 | 485 | if self._reloader.check_all: |
|
487 | 486 | stream.write("Modules to reload:\nall-except-skipped\n") |
|
488 | 487 | else: |
|
489 | 488 | stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload)) |
|
490 | 489 | stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip)) |
|
491 | 490 | elif modname.startswith('-'): |
|
492 | 491 | modname = modname[1:] |
|
493 | 492 | self._reloader.mark_module_skipped(modname) |
|
494 | 493 | else: |
|
495 | 494 | for _module in ([_.strip() for _ in modname.split(',')]): |
|
496 | 495 | top_module, top_name = self._reloader.aimport_module(_module) |
|
497 | 496 | |
|
498 | 497 | # Inject module to user namespace |
|
499 | 498 | self.shell.push({top_name: top_module}) |
|
500 | 499 | |
|
501 | 500 | def pre_run_cell(self): |
|
502 | 501 | if self._reloader.enabled: |
|
503 | 502 | try: |
|
504 | 503 | self._reloader.check() |
|
505 | 504 | except: |
|
506 | 505 | pass |
|
507 | 506 | |
|
508 | 507 | def post_execute_hook(self): |
|
509 | 508 | """Cache the modification times of any modules imported in this execution |
|
510 | 509 | """ |
|
511 | 510 | newly_loaded_modules = set(sys.modules) - self.loaded_modules |
|
512 | 511 | for modname in newly_loaded_modules: |
|
513 | 512 | _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname]) |
|
514 | 513 | if pymtime is not None: |
|
515 | 514 | self._reloader.modules_mtimes[modname] = pymtime |
|
516 | 515 | |
|
517 | 516 | self.loaded_modules.update(newly_loaded_modules) |
|
518 | 517 | |
|
519 | 518 | |
|
520 | 519 | def load_ipython_extension(ip): |
|
521 | 520 | """Load the extension in IPython.""" |
|
522 | 521 | auto_reload = AutoreloadMagics(ip) |
|
523 | 522 | ip.register_magics(auto_reload) |
|
524 | 523 | ip.events.register('pre_run_cell', auto_reload.pre_run_cell) |
|
525 | 524 | ip.events.register('post_execute', auto_reload.post_execute_hook) |
@@ -1,114 +1,114 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | Password generation for the IPython notebook. |
|
3 | 3 | """ |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Imports |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Stdlib |
|
8 | 8 | import getpass |
|
9 | 9 | import hashlib |
|
10 | 10 | import random |
|
11 | 11 | |
|
12 | 12 | # Our own |
|
13 | 13 | from IPython.core.error import UsageError |
|
14 |
from IPython.utils.py3compat import |
|
|
14 | from IPython.utils.py3compat import encode | |
|
15 | 15 | |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | # Globals |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | |
|
20 | 20 | # Length of the salt in nr of hex chars, which implies salt_len * 4 |
|
21 | 21 | # bits of randomness. |
|
22 | 22 | salt_len = 12 |
|
23 | 23 | |
|
24 | 24 | #----------------------------------------------------------------------------- |
|
25 | 25 | # Functions |
|
26 | 26 | #----------------------------------------------------------------------------- |
|
27 | 27 | |
|
28 | 28 | def passwd(passphrase=None, algorithm='sha1'): |
|
29 | 29 | """Generate hashed password and salt for use in notebook configuration. |
|
30 | 30 | |
|
31 | 31 | In the notebook configuration, set `c.NotebookApp.password` to |
|
32 | 32 | the generated string. |
|
33 | 33 | |
|
34 | 34 | Parameters |
|
35 | 35 | ---------- |
|
36 | 36 | passphrase : str |
|
37 | 37 | Password to hash. If unspecified, the user is asked to input |
|
38 | 38 | and verify a password. |
|
39 | 39 | algorithm : str |
|
40 | 40 | Hashing algorithm to use (e.g, 'sha1' or any argument supported |
|
41 | 41 | by :func:`hashlib.new`). |
|
42 | 42 | |
|
43 | 43 | Returns |
|
44 | 44 | ------- |
|
45 | 45 | hashed_passphrase : str |
|
46 | 46 | Hashed password, in the format 'hash_algorithm:salt:passphrase_hash'. |
|
47 | 47 | |
|
48 | 48 | Examples |
|
49 | 49 | -------- |
|
50 | 50 | >>> passwd('mypassword') |
|
51 | 51 | 'sha1:7cf3:b7d6da294ea9592a9480c8f52e63cd42cfb9dd12' |
|
52 | 52 | |
|
53 | 53 | """ |
|
54 | 54 | if passphrase is None: |
|
55 | 55 | for i in range(3): |
|
56 | 56 | p0 = getpass.getpass('Enter password: ') |
|
57 | 57 | p1 = getpass.getpass('Verify password: ') |
|
58 | 58 | if p0 == p1: |
|
59 | 59 | passphrase = p0 |
|
60 | 60 | break |
|
61 | 61 | else: |
|
62 | 62 | print('Passwords do not match.') |
|
63 | 63 | else: |
|
64 | 64 | raise UsageError('No matching passwords found. Giving up.') |
|
65 | 65 | |
|
66 | 66 | h = hashlib.new(algorithm) |
|
67 | 67 | salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len) |
|
68 |
h.update( |
|
|
68 | h.update(encode(passphrase, 'utf-8') + encode(salt, 'ascii')) | |
|
69 | 69 | |
|
70 | 70 | return ':'.join((algorithm, salt, h.hexdigest())) |
|
71 | 71 | |
|
72 | 72 | |
|
73 | 73 | def passwd_check(hashed_passphrase, passphrase): |
|
74 | 74 | """Verify that a given passphrase matches its hashed version. |
|
75 | 75 | |
|
76 | 76 | Parameters |
|
77 | 77 | ---------- |
|
78 | 78 | hashed_passphrase : str |
|
79 | 79 | Hashed password, in the format returned by `passwd`. |
|
80 | 80 | passphrase : str |
|
81 | 81 | Passphrase to validate. |
|
82 | 82 | |
|
83 | 83 | Returns |
|
84 | 84 | ------- |
|
85 | 85 | valid : bool |
|
86 | 86 | True if the passphrase matches the hash. |
|
87 | 87 | |
|
88 | 88 | Examples |
|
89 | 89 | -------- |
|
90 | 90 | >>> from IPython.lib.security import passwd_check |
|
91 | 91 | >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', |
|
92 | 92 | ... 'mypassword') |
|
93 | 93 | True |
|
94 | 94 | |
|
95 | 95 | >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', |
|
96 | 96 | ... 'anotherpassword') |
|
97 | 97 | False |
|
98 | 98 | """ |
|
99 | 99 | try: |
|
100 | 100 | algorithm, salt, pw_digest = hashed_passphrase.split(':', 2) |
|
101 | 101 | except (ValueError, TypeError): |
|
102 | 102 | return False |
|
103 | 103 | |
|
104 | 104 | try: |
|
105 | 105 | h = hashlib.new(algorithm) |
|
106 | 106 | except ValueError: |
|
107 | 107 | return False |
|
108 | 108 | |
|
109 | 109 | if len(pw_digest) == 0: |
|
110 | 110 | return False |
|
111 | 111 | |
|
112 |
h.update( |
|
|
112 | h.update(encode(passphrase, 'utf-8') + encode(salt, 'ascii')) | |
|
113 | 113 | |
|
114 | 114 | return h.hexdigest() == pw_digest |
@@ -1,540 +1,540 b'' | |||
|
1 | 1 | """IPython terminal interface using prompt_toolkit""" |
|
2 | 2 | |
|
3 | 3 | import os |
|
4 | 4 | import sys |
|
5 | 5 | import warnings |
|
6 | 6 | from warnings import warn |
|
7 | 7 | |
|
8 | 8 | from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC |
|
9 | 9 | from IPython.utils import io |
|
10 |
from IPython.utils.py3compat import input |
|
|
10 | from IPython.utils.py3compat import input | |
|
11 | 11 | from IPython.utils.terminal import toggle_set_term_title, set_term_title |
|
12 | 12 | from IPython.utils.process import abbrev_cwd |
|
13 | 13 | from traitlets import ( |
|
14 | 14 | Bool, Unicode, Dict, Integer, observe, Instance, Type, default, Enum, Union, |
|
15 | 15 | Any, |
|
16 | 16 | ) |
|
17 | 17 | |
|
18 | 18 | from prompt_toolkit.document import Document |
|
19 | 19 | from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode |
|
20 | 20 | from prompt_toolkit.filters import (HasFocus, Condition, IsDone) |
|
21 | 21 | from prompt_toolkit.history import InMemoryHistory |
|
22 | 22 | from prompt_toolkit.shortcuts import create_prompt_application, create_eventloop, create_prompt_layout, create_output |
|
23 | 23 | from prompt_toolkit.interface import CommandLineInterface |
|
24 | 24 | from prompt_toolkit.key_binding.manager import KeyBindingManager |
|
25 | 25 | from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor |
|
26 | 26 | from prompt_toolkit.styles import PygmentsStyle, DynamicStyle |
|
27 | 27 | |
|
28 | 28 | from pygments.styles import get_style_by_name |
|
29 | 29 | from pygments.style import Style |
|
30 | 30 | from pygments.token import Token |
|
31 | 31 | |
|
32 | 32 | from .debugger import TerminalPdb, Pdb |
|
33 | 33 | from .magics import TerminalMagics |
|
34 | 34 | from .pt_inputhooks import get_inputhook_name_and_func |
|
35 | 35 | from .prompts import Prompts, ClassicPrompts, RichPromptDisplayHook |
|
36 | 36 | from .ptutils import IPythonPTCompleter, IPythonPTLexer |
|
37 | 37 | from .shortcuts import register_ipython_shortcuts |
|
38 | 38 | |
|
39 | 39 | DISPLAY_BANNER_DEPRECATED = object() |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | class _NoStyle(Style): pass |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | _style_overrides_light_bg = { |
|
47 | 47 | Token.Prompt: '#0000ff', |
|
48 | 48 | Token.PromptNum: '#0000ee bold', |
|
49 | 49 | Token.OutPrompt: '#cc0000', |
|
50 | 50 | Token.OutPromptNum: '#bb0000 bold', |
|
51 | 51 | } |
|
52 | 52 | |
|
53 | 53 | _style_overrides_linux = { |
|
54 | 54 | Token.Prompt: '#00cc00', |
|
55 | 55 | Token.PromptNum: '#00bb00 bold', |
|
56 | 56 | Token.OutPrompt: '#cc0000', |
|
57 | 57 | Token.OutPromptNum: '#bb0000 bold', |
|
58 | 58 | } |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | def get_default_editor(): |
|
63 | 63 | try: |
|
64 | 64 | return os.environ['EDITOR'] |
|
65 | 65 | except KeyError: |
|
66 | 66 | pass |
|
67 | 67 | except UnicodeError: |
|
68 | 68 | warn("$EDITOR environment variable is not pure ASCII. Using platform " |
|
69 | 69 | "default editor.") |
|
70 | 70 | |
|
71 | 71 | if os.name == 'posix': |
|
72 | 72 | return 'vi' # the only one guaranteed to be there! |
|
73 | 73 | else: |
|
74 | 74 | return 'notepad' # same in Windows! |
|
75 | 75 | |
|
76 | 76 | # conservatively check for tty |
|
77 | 77 | # overridden streams can result in things like: |
|
78 | 78 | # - sys.stdin = None |
|
79 | 79 | # - no isatty method |
|
80 | 80 | for _name in ('stdin', 'stdout', 'stderr'): |
|
81 | 81 | _stream = getattr(sys, _name) |
|
82 | 82 | if not _stream or not hasattr(_stream, 'isatty') or not _stream.isatty(): |
|
83 | 83 | _is_tty = False |
|
84 | 84 | break |
|
85 | 85 | else: |
|
86 | 86 | _is_tty = True |
|
87 | 87 | |
|
88 | 88 | |
|
89 | 89 | _use_simple_prompt = ('IPY_TEST_SIMPLE_PROMPT' in os.environ) or (not _is_tty) |
|
90 | 90 | |
|
91 | 91 | class TerminalInteractiveShell(InteractiveShell): |
|
92 | 92 | space_for_menu = Integer(6, help='Number of line at the bottom of the screen ' |
|
93 | 93 | 'to reserve for the completion menu' |
|
94 | 94 | ).tag(config=True) |
|
95 | 95 | |
|
96 | 96 | def _space_for_menu_changed(self, old, new): |
|
97 | 97 | self._update_layout() |
|
98 | 98 | |
|
99 | 99 | pt_cli = None |
|
100 | 100 | debugger_history = None |
|
101 | 101 | _pt_app = None |
|
102 | 102 | |
|
103 | 103 | simple_prompt = Bool(_use_simple_prompt, |
|
104 | 104 | help="""Use `raw_input` for the REPL, without completion and prompt colors. |
|
105 | 105 | |
|
106 | 106 | Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are: |
|
107 | 107 | IPython own testing machinery, and emacs inferior-shell integration through elpy. |
|
108 | 108 | |
|
109 | 109 | This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` |
|
110 | 110 | environment variable is set, or the current terminal is not a tty.""" |
|
111 | 111 | ).tag(config=True) |
|
112 | 112 | |
|
113 | 113 | @property |
|
114 | 114 | def debugger_cls(self): |
|
115 | 115 | return Pdb if self.simple_prompt else TerminalPdb |
|
116 | 116 | |
|
117 | 117 | confirm_exit = Bool(True, |
|
118 | 118 | help=""" |
|
119 | 119 | Set to confirm when you try to exit IPython with an EOF (Control-D |
|
120 | 120 | in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit', |
|
121 | 121 | you can force a direct exit without any confirmation.""", |
|
122 | 122 | ).tag(config=True) |
|
123 | 123 | |
|
124 | 124 | editing_mode = Unicode('emacs', |
|
125 | 125 | help="Shortcut style to use at the prompt. 'vi' or 'emacs'.", |
|
126 | 126 | ).tag(config=True) |
|
127 | 127 | |
|
128 | 128 | mouse_support = Bool(False, |
|
129 | 129 | help="Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)" |
|
130 | 130 | ).tag(config=True) |
|
131 | 131 | |
|
132 | 132 | # We don't load the list of styles for the help string, because loading |
|
133 | 133 | # Pygments plugins takes time and can cause unexpected errors. |
|
134 | 134 | highlighting_style = Union([Unicode('legacy'), Type(klass=Style)], |
|
135 | 135 | help="""The name or class of a Pygments style to use for syntax |
|
136 | 136 | highlighting. To see available styles, run `pygmentize -L styles`.""" |
|
137 | 137 | ).tag(config=True) |
|
138 | 138 | |
|
139 | 139 | |
|
140 | 140 | @observe('highlighting_style') |
|
141 | 141 | @observe('colors') |
|
142 | 142 | def _highlighting_style_changed(self, change): |
|
143 | 143 | self.refresh_style() |
|
144 | 144 | |
|
145 | 145 | def refresh_style(self): |
|
146 | 146 | self._style = self._make_style_from_name_or_cls(self.highlighting_style) |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | highlighting_style_overrides = Dict( |
|
150 | 150 | help="Override highlighting format for specific tokens" |
|
151 | 151 | ).tag(config=True) |
|
152 | 152 | |
|
153 | 153 | true_color = Bool(False, |
|
154 | 154 | help=("Use 24bit colors instead of 256 colors in prompt highlighting. " |
|
155 | 155 | "If your terminal supports true color, the following command " |
|
156 | 156 | "should print 'TRUECOLOR' in orange: " |
|
157 | 157 | "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"") |
|
158 | 158 | ).tag(config=True) |
|
159 | 159 | |
|
160 | 160 | editor = Unicode(get_default_editor(), |
|
161 | 161 | help="Set the editor used by IPython (default to $EDITOR/vi/notepad)." |
|
162 | 162 | ).tag(config=True) |
|
163 | 163 | |
|
164 | 164 | prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True) |
|
165 | 165 | |
|
166 | 166 | prompts = Instance(Prompts) |
|
167 | 167 | |
|
168 | 168 | @default('prompts') |
|
169 | 169 | def _prompts_default(self): |
|
170 | 170 | return self.prompts_class(self) |
|
171 | 171 | |
|
172 | 172 | @observe('prompts') |
|
173 | 173 | def _(self, change): |
|
174 | 174 | self._update_layout() |
|
175 | 175 | |
|
176 | 176 | @default('displayhook_class') |
|
177 | 177 | def _displayhook_class_default(self): |
|
178 | 178 | return RichPromptDisplayHook |
|
179 | 179 | |
|
180 | 180 | term_title = Bool(True, |
|
181 | 181 | help="Automatically set the terminal title" |
|
182 | 182 | ).tag(config=True) |
|
183 | 183 | |
|
184 | 184 | term_title_format = Unicode("IPython: {cwd}", |
|
185 | 185 | help="Customize the terminal title format. This is a python format string. " + |
|
186 | 186 | "Available substitutions are: {cwd}." |
|
187 | 187 | ).tag(config=True) |
|
188 | 188 | |
|
189 | 189 | display_completions = Enum(('column', 'multicolumn','readlinelike'), |
|
190 | 190 | help= ( "Options for displaying tab completions, 'column', 'multicolumn', and " |
|
191 | 191 | "'readlinelike'. These options are for `prompt_toolkit`, see " |
|
192 | 192 | "`prompt_toolkit` documentation for more information." |
|
193 | 193 | ), |
|
194 | 194 | default_value='multicolumn').tag(config=True) |
|
195 | 195 | |
|
196 | 196 | highlight_matching_brackets = Bool(True, |
|
197 | 197 | help="Highlight matching brackets.", |
|
198 | 198 | ).tag(config=True) |
|
199 | 199 | |
|
200 | 200 | extra_open_editor_shortcuts = Bool(False, |
|
201 | 201 | help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. " |
|
202 | 202 | "This is in addition to the F2 binding, which is always enabled." |
|
203 | 203 | ).tag(config=True) |
|
204 | 204 | |
|
205 | 205 | handle_return = Any(None, |
|
206 | 206 | help="Provide an alternative handler to be called when the user presses " |
|
207 | 207 | "Return. This is an advanced option intended for debugging, which " |
|
208 | 208 | "may be changed or removed in later releases." |
|
209 | 209 | ).tag(config=True) |
|
210 | 210 | |
|
211 | 211 | @observe('term_title') |
|
212 | 212 | def init_term_title(self, change=None): |
|
213 | 213 | # Enable or disable the terminal title. |
|
214 | 214 | if self.term_title: |
|
215 | 215 | toggle_set_term_title(True) |
|
216 | 216 | set_term_title(self.term_title_format.format(cwd=abbrev_cwd())) |
|
217 | 217 | else: |
|
218 | 218 | toggle_set_term_title(False) |
|
219 | 219 | |
|
220 | 220 | def init_display_formatter(self): |
|
221 | 221 | super(TerminalInteractiveShell, self).init_display_formatter() |
|
222 | 222 | # terminal only supports plain text |
|
223 | 223 | self.display_formatter.active_types = ['text/plain'] |
|
224 | 224 | # disable `_ipython_display_` |
|
225 | 225 | self.display_formatter.ipython_display_formatter.enabled = False |
|
226 | 226 | |
|
227 | 227 | def init_prompt_toolkit_cli(self): |
|
228 | 228 | if self.simple_prompt: |
|
229 | 229 | # Fall back to plain non-interactive output for tests. |
|
230 | 230 | # This is very limited, and only accepts a single line. |
|
231 | 231 | def prompt(): |
|
232 | 232 | isp = self.input_splitter |
|
233 | 233 | prompt_text = "".join(x[1] for x in self.prompts.in_prompt_tokens()) |
|
234 | 234 | prompt_continuation = "".join(x[1] for x in self.prompts.continuation_prompt_tokens()) |
|
235 | 235 | while isp.push_accepts_more(): |
|
236 |
line = |
|
|
236 | line = input(prompt_text) | |
|
237 | 237 | isp.push(line) |
|
238 | 238 | prompt_text = prompt_continuation |
|
239 | 239 | return isp.source_reset() |
|
240 | 240 | self.prompt_for_code = prompt |
|
241 | 241 | return |
|
242 | 242 | |
|
243 | 243 | # Set up keyboard shortcuts |
|
244 | 244 | kbmanager = KeyBindingManager.for_prompt( |
|
245 | 245 | enable_open_in_editor=self.extra_open_editor_shortcuts, |
|
246 | 246 | ) |
|
247 | 247 | register_ipython_shortcuts(kbmanager.registry, self) |
|
248 | 248 | |
|
249 | 249 | # Pre-populate history from IPython's history database |
|
250 | 250 | history = InMemoryHistory() |
|
251 | 251 | last_cell = u"" |
|
252 | 252 | for __, ___, cell in self.history_manager.get_tail(self.history_load_length, |
|
253 | 253 | include_latest=True): |
|
254 | 254 | # Ignore blank lines and consecutive duplicates |
|
255 | 255 | cell = cell.rstrip() |
|
256 | 256 | if cell and (cell != last_cell): |
|
257 | 257 | history.append(cell) |
|
258 | 258 | last_cell = cell |
|
259 | 259 | |
|
260 | 260 | self._style = self._make_style_from_name_or_cls(self.highlighting_style) |
|
261 | 261 | self.style = DynamicStyle(lambda: self._style) |
|
262 | 262 | |
|
263 | 263 | editing_mode = getattr(EditingMode, self.editing_mode.upper()) |
|
264 | 264 | |
|
265 | 265 | def patch_stdout(**kwargs): |
|
266 | 266 | return self.pt_cli.patch_stdout_context(**kwargs) |
|
267 | 267 | |
|
268 | 268 | self._pt_app = create_prompt_application( |
|
269 | 269 | editing_mode=editing_mode, |
|
270 | 270 | key_bindings_registry=kbmanager.registry, |
|
271 | 271 | history=history, |
|
272 | 272 | completer=IPythonPTCompleter(shell=self, |
|
273 | 273 | patch_stdout=patch_stdout), |
|
274 | 274 | enable_history_search=True, |
|
275 | 275 | style=self.style, |
|
276 | 276 | mouse_support=self.mouse_support, |
|
277 | 277 | **self._layout_options() |
|
278 | 278 | ) |
|
279 | 279 | self._eventloop = create_eventloop(self.inputhook) |
|
280 | 280 | self.pt_cli = CommandLineInterface( |
|
281 | 281 | self._pt_app, eventloop=self._eventloop, |
|
282 | 282 | output=create_output(true_color=self.true_color)) |
|
283 | 283 | |
|
284 | 284 | def _make_style_from_name_or_cls(self, name_or_cls): |
|
285 | 285 | """ |
|
286 | 286 | Small wrapper that make an IPython compatible style from a style name |
|
287 | 287 | |
|
288 | 288 | We need that to add style for prompt ... etc. |
|
289 | 289 | """ |
|
290 | 290 | style_overrides = {} |
|
291 | 291 | if name_or_cls == 'legacy': |
|
292 | 292 | legacy = self.colors.lower() |
|
293 | 293 | if legacy == 'linux': |
|
294 | 294 | style_cls = get_style_by_name('monokai') |
|
295 | 295 | style_overrides = _style_overrides_linux |
|
296 | 296 | elif legacy == 'lightbg': |
|
297 | 297 | style_overrides = _style_overrides_light_bg |
|
298 | 298 | style_cls = get_style_by_name('pastie') |
|
299 | 299 | elif legacy == 'neutral': |
|
300 | 300 | # The default theme needs to be visible on both a dark background |
|
301 | 301 | # and a light background, because we can't tell what the terminal |
|
302 | 302 | # looks like. These tweaks to the default theme help with that. |
|
303 | 303 | style_cls = get_style_by_name('default') |
|
304 | 304 | style_overrides.update({ |
|
305 | 305 | Token.Number: '#007700', |
|
306 | 306 | Token.Operator: 'noinherit', |
|
307 | 307 | Token.String: '#BB6622', |
|
308 | 308 | Token.Name.Function: '#2080D0', |
|
309 | 309 | Token.Name.Class: 'bold #2080D0', |
|
310 | 310 | Token.Name.Namespace: 'bold #2080D0', |
|
311 | 311 | Token.Prompt: '#009900', |
|
312 | 312 | Token.PromptNum: '#00ff00 bold', |
|
313 | 313 | Token.OutPrompt: '#990000', |
|
314 | 314 | Token.OutPromptNum: '#ff0000 bold', |
|
315 | 315 | }) |
|
316 | 316 | |
|
317 | 317 | # Hack: Due to limited color support on the Windows console |
|
318 | 318 | # the prompt colors will be wrong without this |
|
319 | 319 | if os.name == 'nt': |
|
320 | 320 | style_overrides.update({ |
|
321 | 321 | Token.Prompt: '#ansidarkgreen', |
|
322 | 322 | Token.PromptNum: '#ansigreen bold', |
|
323 | 323 | Token.OutPrompt: '#ansidarkred', |
|
324 | 324 | Token.OutPromptNum: '#ansired bold', |
|
325 | 325 | }) |
|
326 | 326 | elif legacy =='nocolor': |
|
327 | 327 | style_cls=_NoStyle |
|
328 | 328 | style_overrides = {} |
|
329 | 329 | else : |
|
330 | 330 | raise ValueError('Got unknown colors: ', legacy) |
|
331 | 331 | else : |
|
332 | 332 | if isinstance(name_or_cls, str): |
|
333 | 333 | style_cls = get_style_by_name(name_or_cls) |
|
334 | 334 | else: |
|
335 | 335 | style_cls = name_or_cls |
|
336 | 336 | style_overrides = { |
|
337 | 337 | Token.Prompt: '#009900', |
|
338 | 338 | Token.PromptNum: '#00ff00 bold', |
|
339 | 339 | Token.OutPrompt: '#990000', |
|
340 | 340 | Token.OutPromptNum: '#ff0000 bold', |
|
341 | 341 | } |
|
342 | 342 | style_overrides.update(self.highlighting_style_overrides) |
|
343 | 343 | style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls, |
|
344 | 344 | style_dict=style_overrides) |
|
345 | 345 | |
|
346 | 346 | return style |
|
347 | 347 | |
|
348 | 348 | def _layout_options(self): |
|
349 | 349 | """ |
|
350 | 350 | Return the current layout option for the current Terminal InteractiveShell |
|
351 | 351 | """ |
|
352 | 352 | return { |
|
353 | 353 | 'lexer':IPythonPTLexer(), |
|
354 | 354 | 'reserve_space_for_menu':self.space_for_menu, |
|
355 | 355 | 'get_prompt_tokens':self.prompts.in_prompt_tokens, |
|
356 | 356 | 'get_continuation_tokens':self.prompts.continuation_prompt_tokens, |
|
357 | 357 | 'multiline':True, |
|
358 | 358 | 'display_completions_in_columns': (self.display_completions == 'multicolumn'), |
|
359 | 359 | |
|
360 | 360 | # Highlight matching brackets, but only when this setting is |
|
361 | 361 | # enabled, and only when the DEFAULT_BUFFER has the focus. |
|
362 | 362 | 'extra_input_processors': [ConditionalProcessor( |
|
363 | 363 | processor=HighlightMatchingBracketProcessor(chars='[](){}'), |
|
364 | 364 | filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() & |
|
365 | 365 | Condition(lambda cli: self.highlight_matching_brackets))], |
|
366 | 366 | } |
|
367 | 367 | |
|
368 | 368 | def _update_layout(self): |
|
369 | 369 | """ |
|
370 | 370 | Ask for a re computation of the application layout, if for example , |
|
371 | 371 | some configuration options have changed. |
|
372 | 372 | """ |
|
373 | 373 | if self._pt_app: |
|
374 | 374 | self._pt_app.layout = create_prompt_layout(**self._layout_options()) |
|
375 | 375 | |
|
376 | 376 | def prompt_for_code(self): |
|
377 | 377 | with self.pt_cli.patch_stdout_context(raw=True): |
|
378 | 378 | document = self.pt_cli.run( |
|
379 | 379 | pre_run=self.pre_prompt, reset_current_buffer=True) |
|
380 | 380 | return document.text |
|
381 | 381 | |
|
382 | 382 | def enable_win_unicode_console(self): |
|
383 | 383 | if sys.version_info >= (3, 6): |
|
384 | 384 | # Since PEP 528, Python uses the unicode APIs for the Windows |
|
385 | 385 | # console by default, so WUC shouldn't be needed. |
|
386 | 386 | return |
|
387 | 387 | |
|
388 | 388 | import win_unicode_console |
|
389 | 389 | win_unicode_console.enable() |
|
390 | 390 | |
|
391 | 391 | def init_io(self): |
|
392 | 392 | if sys.platform not in {'win32', 'cli'}: |
|
393 | 393 | return |
|
394 | 394 | |
|
395 | 395 | self.enable_win_unicode_console() |
|
396 | 396 | |
|
397 | 397 | import colorama |
|
398 | 398 | colorama.init() |
|
399 | 399 | |
|
400 | 400 | # For some reason we make these wrappers around stdout/stderr. |
|
401 | 401 | # For now, we need to reset them so all output gets coloured. |
|
402 | 402 | # https://github.com/ipython/ipython/issues/8669 |
|
403 | 403 | # io.std* are deprecated, but don't show our own deprecation warnings |
|
404 | 404 | # during initialization of the deprecated API. |
|
405 | 405 | with warnings.catch_warnings(): |
|
406 | 406 | warnings.simplefilter('ignore', DeprecationWarning) |
|
407 | 407 | io.stdout = io.IOStream(sys.stdout) |
|
408 | 408 | io.stderr = io.IOStream(sys.stderr) |
|
409 | 409 | |
|
410 | 410 | def init_magics(self): |
|
411 | 411 | super(TerminalInteractiveShell, self).init_magics() |
|
412 | 412 | self.register_magics(TerminalMagics) |
|
413 | 413 | |
|
414 | 414 | def init_alias(self): |
|
415 | 415 | # The parent class defines aliases that can be safely used with any |
|
416 | 416 | # frontend. |
|
417 | 417 | super(TerminalInteractiveShell, self).init_alias() |
|
418 | 418 | |
|
419 | 419 | # Now define aliases that only make sense on the terminal, because they |
|
420 | 420 | # need direct access to the console in a way that we can't emulate in |
|
421 | 421 | # GUI or web frontend |
|
422 | 422 | if os.name == 'posix': |
|
423 | 423 | for cmd in ['clear', 'more', 'less', 'man']: |
|
424 | 424 | self.alias_manager.soft_define_alias(cmd, cmd) |
|
425 | 425 | |
|
426 | 426 | |
|
427 | 427 | def __init__(self, *args, **kwargs): |
|
428 | 428 | super(TerminalInteractiveShell, self).__init__(*args, **kwargs) |
|
429 | 429 | self.init_prompt_toolkit_cli() |
|
430 | 430 | self.init_term_title() |
|
431 | 431 | self.keep_running = True |
|
432 | 432 | |
|
433 | 433 | self.debugger_history = InMemoryHistory() |
|
434 | 434 | |
|
435 | 435 | def ask_exit(self): |
|
436 | 436 | self.keep_running = False |
|
437 | 437 | |
|
438 | 438 | rl_next_input = None |
|
439 | 439 | |
|
440 | 440 | def pre_prompt(self): |
|
441 | 441 | if self.rl_next_input: |
|
442 | 442 | # We can't set the buffer here, because it will be reset just after |
|
443 | 443 | # this. Adding a callable to pre_run_callables does what we need |
|
444 | 444 | # after the buffer is reset. |
|
445 | 445 | s = self.rl_next_input |
|
446 | 446 | def set_doc(): |
|
447 | 447 | self.pt_cli.application.buffer.document = Document(s) |
|
448 | 448 | if hasattr(self.pt_cli, 'pre_run_callables'): |
|
449 | 449 | self.pt_cli.pre_run_callables.append(set_doc) |
|
450 | 450 | else: |
|
451 | 451 | # Older version of prompt_toolkit; it's OK to set the document |
|
452 | 452 | # directly here. |
|
453 | 453 | set_doc() |
|
454 | 454 | self.rl_next_input = None |
|
455 | 455 | |
|
456 | 456 | def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED): |
|
457 | 457 | |
|
458 | 458 | if display_banner is not DISPLAY_BANNER_DEPRECATED: |
|
459 | 459 | warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2) |
|
460 | 460 | |
|
461 | 461 | self.keep_running = True |
|
462 | 462 | while self.keep_running: |
|
463 | 463 | print(self.separate_in, end='') |
|
464 | 464 | |
|
465 | 465 | try: |
|
466 | 466 | code = self.prompt_for_code() |
|
467 | 467 | except EOFError: |
|
468 | 468 | if (not self.confirm_exit) \ |
|
469 | 469 | or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'): |
|
470 | 470 | self.ask_exit() |
|
471 | 471 | |
|
472 | 472 | else: |
|
473 | 473 | if code: |
|
474 | 474 | self.run_cell(code, store_history=True) |
|
475 | 475 | |
|
476 | 476 | def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED): |
|
477 | 477 | # An extra layer of protection in case someone mashing Ctrl-C breaks |
|
478 | 478 | # out of our internal code. |
|
479 | 479 | if display_banner is not DISPLAY_BANNER_DEPRECATED: |
|
480 | 480 | warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2) |
|
481 | 481 | while True: |
|
482 | 482 | try: |
|
483 | 483 | self.interact() |
|
484 | 484 | break |
|
485 | 485 | except KeyboardInterrupt as e: |
|
486 | 486 | print("\n%s escaped interact()\n" % type(e).__name__) |
|
487 | 487 | finally: |
|
488 | 488 | # An interrupt during the eventloop will mess up the |
|
489 | 489 | # internal state of the prompt_toolkit library. |
|
490 | 490 | # Stopping the eventloop fixes this, see |
|
491 | 491 | # https://github.com/ipython/ipython/pull/9867 |
|
492 | 492 | if hasattr(self, '_eventloop'): |
|
493 | 493 | self._eventloop.stop() |
|
494 | 494 | |
|
495 | 495 | _inputhook = None |
|
496 | 496 | def inputhook(self, context): |
|
497 | 497 | if self._inputhook is not None: |
|
498 | 498 | self._inputhook(context) |
|
499 | 499 | |
|
500 | 500 | active_eventloop = None |
|
501 | 501 | def enable_gui(self, gui=None): |
|
502 | 502 | if gui: |
|
503 | 503 | self.active_eventloop, self._inputhook =\ |
|
504 | 504 | get_inputhook_name_and_func(gui) |
|
505 | 505 | else: |
|
506 | 506 | self.active_eventloop = self._inputhook = None |
|
507 | 507 | |
|
508 | 508 | # Run !system commands directly, not through pipes, so terminal programs |
|
509 | 509 | # work correctly. |
|
510 | 510 | system = InteractiveShell.system_raw |
|
511 | 511 | |
|
512 | 512 | def auto_rewrite_input(self, cmd): |
|
513 | 513 | """Overridden from the parent class to use fancy rewriting prompt""" |
|
514 | 514 | if not self.show_rewritten_input: |
|
515 | 515 | return |
|
516 | 516 | |
|
517 | 517 | tokens = self.prompts.rewrite_prompt_tokens() |
|
518 | 518 | if self.pt_cli: |
|
519 | 519 | self.pt_cli.print_tokens(tokens) |
|
520 | 520 | print(cmd) |
|
521 | 521 | else: |
|
522 | 522 | prompt = ''.join(s for t, s in tokens) |
|
523 | 523 | print(prompt, cmd, sep='') |
|
524 | 524 | |
|
525 | 525 | _prompts_before = None |
|
526 | 526 | def switch_doctest_mode(self, mode): |
|
527 | 527 | """Switch prompts to classic for %doctest_mode""" |
|
528 | 528 | if mode: |
|
529 | 529 | self._prompts_before = self.prompts |
|
530 | 530 | self.prompts = ClassicPrompts(self) |
|
531 | 531 | elif self._prompts_before: |
|
532 | 532 | self.prompts = self._prompts_before |
|
533 | 533 | self._prompts_before = None |
|
534 | 534 | self._update_layout() |
|
535 | 535 | |
|
536 | 536 | |
|
537 | 537 | InteractiveShellABC.register(TerminalInteractiveShell) |
|
538 | 538 | |
|
539 | 539 | if __name__ == '__main__': |
|
540 | 540 | TerminalInteractiveShell.instance().interact() |
@@ -1,378 +1,376 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Decorators for labeling test objects. |
|
3 | 3 | |
|
4 | 4 | Decorators that merely return a modified version of the original function |
|
5 | 5 | object are straightforward. Decorators that return a new function object need |
|
6 | 6 | to use nose.tools.make_decorator(original_function)(decorator) in returning the |
|
7 | 7 | decorator, in order to preserve metadata such as function name, setup and |
|
8 | 8 | teardown functions and so on - see nose.tools for more information. |
|
9 | 9 | |
|
10 | 10 | This module provides a set of useful decorators meant to be ready to use in |
|
11 | 11 | your own tests. See the bottom of the file for the ready-made ones, and if you |
|
12 | 12 | find yourself writing a new one that may be of generic use, add it here. |
|
13 | 13 | |
|
14 | 14 | Included decorators: |
|
15 | 15 | |
|
16 | 16 | |
|
17 | 17 | Lightweight testing that remains unittest-compatible. |
|
18 | 18 | |
|
19 | 19 | - An @as_unittest decorator can be used to tag any normal parameter-less |
|
20 | 20 | function as a unittest TestCase. Then, both nose and normal unittest will |
|
21 | 21 | recognize it as such. This will make it easier to migrate away from Nose if |
|
22 | 22 | we ever need/want to while maintaining very lightweight tests. |
|
23 | 23 | |
|
24 | 24 | NOTE: This file contains IPython-specific decorators. Using the machinery in |
|
25 | 25 | IPython.external.decorators, we import either numpy.testing.decorators if numpy is |
|
26 | 26 | available, OR use equivalent code in IPython.external._decorators, which |
|
27 | 27 | we've copied verbatim from numpy. |
|
28 | 28 | |
|
29 | 29 | """ |
|
30 | 30 | |
|
31 | 31 | # Copyright (c) IPython Development Team. |
|
32 | 32 | # Distributed under the terms of the Modified BSD License. |
|
33 | 33 | |
|
34 | import sys | |
|
35 | 34 | import os |
|
35 | import shutil | |
|
36 | import sys | |
|
36 | 37 | import tempfile |
|
37 | 38 | import unittest |
|
38 | 39 | import warnings |
|
39 | 40 | from importlib import import_module |
|
40 | 41 | |
|
41 | 42 | from decorator import decorator |
|
42 | 43 | |
|
43 | 44 | # Expose the unittest-driven decorators |
|
44 | 45 | from .ipunittest import ipdoctest, ipdocstring |
|
45 | 46 | |
|
46 | 47 | # Grab the numpy-specific decorators which we keep in a file that we |
|
47 | 48 | # occasionally update from upstream: decorators.py is a copy of |
|
48 | 49 | # numpy.testing.decorators, we expose all of it here. |
|
49 | 50 | from IPython.external.decorators import * |
|
50 | 51 | |
|
51 | # For onlyif_cmd_exists decorator | |
|
52 | from IPython.utils.py3compat import which | |
|
53 | ||
|
54 | 52 | #----------------------------------------------------------------------------- |
|
55 | 53 | # Classes and functions |
|
56 | 54 | #----------------------------------------------------------------------------- |
|
57 | 55 | |
|
58 | 56 | # Simple example of the basic idea |
|
59 | 57 | def as_unittest(func): |
|
60 | 58 | """Decorator to make a simple function into a normal test via unittest.""" |
|
61 | 59 | class Tester(unittest.TestCase): |
|
62 | 60 | def test(self): |
|
63 | 61 | func() |
|
64 | 62 | |
|
65 | 63 | Tester.__name__ = func.__name__ |
|
66 | 64 | |
|
67 | 65 | return Tester |
|
68 | 66 | |
|
69 | 67 | # Utility functions |
|
70 | 68 | |
|
71 | 69 | def apply_wrapper(wrapper, func): |
|
72 | 70 | """Apply a wrapper to a function for decoration. |
|
73 | 71 | |
|
74 | 72 | This mixes Michele Simionato's decorator tool with nose's make_decorator, |
|
75 | 73 | to apply a wrapper in a decorator so that all nose attributes, as well as |
|
76 | 74 | function signature and other properties, survive the decoration cleanly. |
|
77 | 75 | This will ensure that wrapped functions can still be well introspected via |
|
78 | 76 | IPython, for example. |
|
79 | 77 | """ |
|
80 | 78 | warnings.warn("The function `apply_wrapper` is deprecated since IPython 4.0", |
|
81 | 79 | DeprecationWarning, stacklevel=2) |
|
82 | 80 | import nose.tools |
|
83 | 81 | |
|
84 | 82 | return decorator(wrapper,nose.tools.make_decorator(func)(wrapper)) |
|
85 | 83 | |
|
86 | 84 | |
|
87 | 85 | def make_label_dec(label, ds=None): |
|
88 | 86 | """Factory function to create a decorator that applies one or more labels. |
|
89 | 87 | |
|
90 | 88 | Parameters |
|
91 | 89 | ---------- |
|
92 | 90 | label : string or sequence |
|
93 | 91 | One or more labels that will be applied by the decorator to the functions |
|
94 | 92 | it decorates. Labels are attributes of the decorated function with their |
|
95 | 93 | value set to True. |
|
96 | 94 | |
|
97 | 95 | ds : string |
|
98 | 96 | An optional docstring for the resulting decorator. If not given, a |
|
99 | 97 | default docstring is auto-generated. |
|
100 | 98 | |
|
101 | 99 | Returns |
|
102 | 100 | ------- |
|
103 | 101 | A decorator. |
|
104 | 102 | |
|
105 | 103 | Examples |
|
106 | 104 | -------- |
|
107 | 105 | |
|
108 | 106 | A simple labeling decorator: |
|
109 | 107 | |
|
110 | 108 | >>> slow = make_label_dec('slow') |
|
111 | 109 | >>> slow.__doc__ |
|
112 | 110 | "Labels a test as 'slow'." |
|
113 | 111 | |
|
114 | 112 | And one that uses multiple labels and a custom docstring: |
|
115 | 113 | |
|
116 | 114 | >>> rare = make_label_dec(['slow','hard'], |
|
117 | 115 | ... "Mix labels 'slow' and 'hard' for rare tests.") |
|
118 | 116 | >>> rare.__doc__ |
|
119 | 117 | "Mix labels 'slow' and 'hard' for rare tests." |
|
120 | 118 | |
|
121 | 119 | Now, let's test using this one: |
|
122 | 120 | >>> @rare |
|
123 | 121 | ... def f(): pass |
|
124 | 122 | ... |
|
125 | 123 | >>> |
|
126 | 124 | >>> f.slow |
|
127 | 125 | True |
|
128 | 126 | >>> f.hard |
|
129 | 127 | True |
|
130 | 128 | """ |
|
131 | 129 | |
|
132 | 130 | warnings.warn("The function `make_label_dec` is deprecated since IPython 4.0", |
|
133 | 131 | DeprecationWarning, stacklevel=2) |
|
134 | 132 | if isinstance(label, str): |
|
135 | 133 | labels = [label] |
|
136 | 134 | else: |
|
137 | 135 | labels = label |
|
138 | 136 | |
|
139 | 137 | # Validate that the given label(s) are OK for use in setattr() by doing a |
|
140 | 138 | # dry run on a dummy function. |
|
141 | 139 | tmp = lambda : None |
|
142 | 140 | for label in labels: |
|
143 | 141 | setattr(tmp,label,True) |
|
144 | 142 | |
|
145 | 143 | # This is the actual decorator we'll return |
|
146 | 144 | def decor(f): |
|
147 | 145 | for label in labels: |
|
148 | 146 | setattr(f,label,True) |
|
149 | 147 | return f |
|
150 | 148 | |
|
151 | 149 | # Apply the user's docstring, or autogenerate a basic one |
|
152 | 150 | if ds is None: |
|
153 | 151 | ds = "Labels a test as %r." % label |
|
154 | 152 | decor.__doc__ = ds |
|
155 | 153 | |
|
156 | 154 | return decor |
|
157 | 155 | |
|
158 | 156 | |
|
159 | 157 | # Inspired by numpy's skipif, but uses the full apply_wrapper utility to |
|
160 | 158 | # preserve function metadata better and allows the skip condition to be a |
|
161 | 159 | # callable. |
|
162 | 160 | def skipif(skip_condition, msg=None): |
|
163 | 161 | ''' Make function raise SkipTest exception if skip_condition is true |
|
164 | 162 | |
|
165 | 163 | Parameters |
|
166 | 164 | ---------- |
|
167 | 165 | |
|
168 | 166 | skip_condition : bool or callable |
|
169 | 167 | Flag to determine whether to skip test. If the condition is a |
|
170 | 168 | callable, it is used at runtime to dynamically make the decision. This |
|
171 | 169 | is useful for tests that may require costly imports, to delay the cost |
|
172 | 170 | until the test suite is actually executed. |
|
173 | 171 | msg : string |
|
174 | 172 | Message to give on raising a SkipTest exception. |
|
175 | 173 | |
|
176 | 174 | Returns |
|
177 | 175 | ------- |
|
178 | 176 | decorator : function |
|
179 | 177 | Decorator, which, when applied to a function, causes SkipTest |
|
180 | 178 | to be raised when the skip_condition was True, and the function |
|
181 | 179 | to be called normally otherwise. |
|
182 | 180 | |
|
183 | 181 | Notes |
|
184 | 182 | ----- |
|
185 | 183 | You will see from the code that we had to further decorate the |
|
186 | 184 | decorator with the nose.tools.make_decorator function in order to |
|
187 | 185 | transmit function name, and various other metadata. |
|
188 | 186 | ''' |
|
189 | 187 | |
|
190 | 188 | def skip_decorator(f): |
|
191 | 189 | # Local import to avoid a hard nose dependency and only incur the |
|
192 | 190 | # import time overhead at actual test-time. |
|
193 | 191 | import nose |
|
194 | 192 | |
|
195 | 193 | # Allow for both boolean or callable skip conditions. |
|
196 | 194 | if callable(skip_condition): |
|
197 | 195 | skip_val = skip_condition |
|
198 | 196 | else: |
|
199 | 197 | skip_val = lambda : skip_condition |
|
200 | 198 | |
|
201 | 199 | def get_msg(func,msg=None): |
|
202 | 200 | """Skip message with information about function being skipped.""" |
|
203 | 201 | if msg is None: out = 'Test skipped due to test condition.' |
|
204 | 202 | else: out = msg |
|
205 | 203 | return "Skipping test: %s. %s" % (func.__name__,out) |
|
206 | 204 | |
|
207 | 205 | # We need to define *two* skippers because Python doesn't allow both |
|
208 | 206 | # return with value and yield inside the same function. |
|
209 | 207 | def skipper_func(*args, **kwargs): |
|
210 | 208 | """Skipper for normal test functions.""" |
|
211 | 209 | if skip_val(): |
|
212 | 210 | raise nose.SkipTest(get_msg(f,msg)) |
|
213 | 211 | else: |
|
214 | 212 | return f(*args, **kwargs) |
|
215 | 213 | |
|
216 | 214 | def skipper_gen(*args, **kwargs): |
|
217 | 215 | """Skipper for test generators.""" |
|
218 | 216 | if skip_val(): |
|
219 | 217 | raise nose.SkipTest(get_msg(f,msg)) |
|
220 | 218 | else: |
|
221 | 219 | for x in f(*args, **kwargs): |
|
222 | 220 | yield x |
|
223 | 221 | |
|
224 | 222 | # Choose the right skipper to use when building the actual generator. |
|
225 | 223 | if nose.util.isgenerator(f): |
|
226 | 224 | skipper = skipper_gen |
|
227 | 225 | else: |
|
228 | 226 | skipper = skipper_func |
|
229 | 227 | |
|
230 | 228 | return nose.tools.make_decorator(f)(skipper) |
|
231 | 229 | |
|
232 | 230 | return skip_decorator |
|
233 | 231 | |
|
234 | 232 | # A version with the condition set to true, common case just to attach a message |
|
235 | 233 | # to a skip decorator |
|
236 | 234 | def skip(msg=None): |
|
237 | 235 | """Decorator factory - mark a test function for skipping from test suite. |
|
238 | 236 | |
|
239 | 237 | Parameters |
|
240 | 238 | ---------- |
|
241 | 239 | msg : string |
|
242 | 240 | Optional message to be added. |
|
243 | 241 | |
|
244 | 242 | Returns |
|
245 | 243 | ------- |
|
246 | 244 | decorator : function |
|
247 | 245 | Decorator, which, when applied to a function, causes SkipTest |
|
248 | 246 | to be raised, with the optional message added. |
|
249 | 247 | """ |
|
250 | 248 | |
|
251 | 249 | return skipif(True,msg) |
|
252 | 250 | |
|
253 | 251 | |
|
254 | 252 | def onlyif(condition, msg): |
|
255 | 253 | """The reverse from skipif, see skipif for details.""" |
|
256 | 254 | |
|
257 | 255 | if callable(condition): |
|
258 | 256 | skip_condition = lambda : not condition() |
|
259 | 257 | else: |
|
260 | 258 | skip_condition = lambda : not condition |
|
261 | 259 | |
|
262 | 260 | return skipif(skip_condition, msg) |
|
263 | 261 | |
|
264 | 262 | #----------------------------------------------------------------------------- |
|
265 | 263 | # Utility functions for decorators |
|
266 | 264 | def module_not_available(module): |
|
267 | 265 | """Can module be imported? Returns true if module does NOT import. |
|
268 | 266 | |
|
269 | 267 | This is used to make a decorator to skip tests that require module to be |
|
270 | 268 | available, but delay the 'import numpy' to test execution time. |
|
271 | 269 | """ |
|
272 | 270 | try: |
|
273 | 271 | mod = import_module(module) |
|
274 | 272 | mod_not_avail = False |
|
275 | 273 | except ImportError: |
|
276 | 274 | mod_not_avail = True |
|
277 | 275 | |
|
278 | 276 | return mod_not_avail |
|
279 | 277 | |
|
280 | 278 | |
|
281 | 279 | def decorated_dummy(dec, name): |
|
282 | 280 | """Return a dummy function decorated with dec, with the given name. |
|
283 | 281 | |
|
284 | 282 | Examples |
|
285 | 283 | -------- |
|
286 | 284 | import IPython.testing.decorators as dec |
|
287 | 285 | setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__) |
|
288 | 286 | """ |
|
289 | 287 | warnings.warn("The function `decorated_dummy` is deprecated since IPython 4.0", |
|
290 | 288 | DeprecationWarning, stacklevel=2) |
|
291 | 289 | dummy = lambda: None |
|
292 | 290 | dummy.__name__ = name |
|
293 | 291 | return dec(dummy) |
|
294 | 292 | |
|
295 | 293 | #----------------------------------------------------------------------------- |
|
296 | 294 | # Decorators for public use |
|
297 | 295 | |
|
298 | 296 | # Decorators to skip certain tests on specific platforms. |
|
299 | 297 | skip_win32 = skipif(sys.platform == 'win32', |
|
300 | 298 | "This test does not run under Windows") |
|
301 | 299 | skip_linux = skipif(sys.platform.startswith('linux'), |
|
302 | 300 | "This test does not run under Linux") |
|
303 | 301 | skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X") |
|
304 | 302 | |
|
305 | 303 | |
|
306 | 304 | # Decorators to skip tests if not on specific platforms. |
|
307 | 305 | skip_if_not_win32 = skipif(sys.platform != 'win32', |
|
308 | 306 | "This test only runs under Windows") |
|
309 | 307 | skip_if_not_linux = skipif(not sys.platform.startswith('linux'), |
|
310 | 308 | "This test only runs under Linux") |
|
311 | 309 | skip_if_not_osx = skipif(sys.platform != 'darwin', |
|
312 | 310 | "This test only runs under OSX") |
|
313 | 311 | |
|
314 | 312 | |
|
315 | 313 | _x11_skip_cond = (sys.platform not in ('darwin', 'win32') and |
|
316 | 314 | os.environ.get('DISPLAY', '') == '') |
|
317 | 315 | _x11_skip_msg = "Skipped under *nix when X11/XOrg not available" |
|
318 | 316 | |
|
319 | 317 | skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg) |
|
320 | 318 | |
|
321 | 319 | # not a decorator itself, returns a dummy function to be used as setup |
|
322 | 320 | def skip_file_no_x11(name): |
|
323 | 321 | warnings.warn("The function `skip_file_no_x11` is deprecated since IPython 4.0", |
|
324 | 322 | DeprecationWarning, stacklevel=2) |
|
325 | 323 | return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None |
|
326 | 324 | |
|
327 | 325 | # Other skip decorators |
|
328 | 326 | |
|
329 | 327 | # generic skip without module |
|
330 | 328 | skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod) |
|
331 | 329 | |
|
332 | 330 | skipif_not_numpy = skip_without('numpy') |
|
333 | 331 | |
|
334 | 332 | skipif_not_matplotlib = skip_without('matplotlib') |
|
335 | 333 | |
|
336 | 334 | skipif_not_sympy = skip_without('sympy') |
|
337 | 335 | |
|
338 | 336 | skip_known_failure = knownfailureif(True,'This test is known to fail') |
|
339 | 337 | |
|
340 | 338 | # A null 'decorator', useful to make more readable code that needs to pick |
|
341 | 339 | # between different decorators based on OS or other conditions |
|
342 | 340 | null_deco = lambda f: f |
|
343 | 341 | |
|
344 | 342 | # Some tests only run where we can use unicode paths. Note that we can't just |
|
345 | 343 | # check os.path.supports_unicode_filenames, which is always False on Linux. |
|
346 | 344 | try: |
|
347 | 345 | f = tempfile.NamedTemporaryFile(prefix=u"tmp€") |
|
348 | 346 | except UnicodeEncodeError: |
|
349 | 347 | unicode_paths = False |
|
350 | 348 | else: |
|
351 | 349 | unicode_paths = True |
|
352 | 350 | f.close() |
|
353 | 351 | |
|
354 | 352 | onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable " |
|
355 | 353 | "where we can use unicode in filenames.")) |
|
356 | 354 | |
|
357 | 355 | |
|
358 | 356 | def onlyif_cmds_exist(*commands): |
|
359 | 357 | """ |
|
360 | 358 | Decorator to skip test when at least one of `commands` is not found. |
|
361 | 359 | """ |
|
362 | 360 | for cmd in commands: |
|
363 | if not which(cmd): | |
|
361 | if not shutil.which(cmd): | |
|
364 | 362 | return skip("This test runs only if command '{0}' " |
|
365 | 363 | "is installed".format(cmd)) |
|
366 | 364 | return null_deco |
|
367 | 365 | |
|
368 | 366 | def onlyif_any_cmd_exists(*commands): |
|
369 | 367 | """ |
|
370 | 368 | Decorator to skip test unless at least one of `commands` is found. |
|
371 | 369 | """ |
|
372 | 370 | warnings.warn("The function `onlyif_any_cmd_exists` is deprecated since IPython 4.0", |
|
373 | 371 | DeprecationWarning, stacklevel=2) |
|
374 | 372 | for cmd in commands: |
|
375 | if which(cmd): | |
|
373 | if shutil.which(cmd): | |
|
376 | 374 | return null_deco |
|
377 | 375 | return skip("This test runs only if one of the commands {0} " |
|
378 | 376 | "is installed".format(commands)) |
@@ -1,136 +1,136 b'' | |||
|
1 | 1 | """Global IPython app to support test running. |
|
2 | 2 | |
|
3 | 3 | We must start our own ipython object and heavily muck with it so that all the |
|
4 | 4 | modifications IPython makes to system behavior don't send the doctest machinery |
|
5 | 5 | into a fit. This code should be considered a gross hack, but it gets the job |
|
6 | 6 | done. |
|
7 | 7 | """ |
|
8 | 8 | |
|
9 | 9 | # Copyright (c) IPython Development Team. |
|
10 | 10 | # Distributed under the terms of the Modified BSD License. |
|
11 | 11 | |
|
12 | 12 | import builtins as builtin_mod |
|
13 | 13 | import sys |
|
14 | import types | |
|
14 | 15 | import warnings |
|
15 | 16 | |
|
16 | 17 | from . import tools |
|
17 | 18 | |
|
18 | 19 | from IPython.core import page |
|
19 | 20 | from IPython.utils import io |
|
20 | from IPython.utils import py3compat | |
|
21 | 21 | from IPython.terminal.interactiveshell import TerminalInteractiveShell |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | class StreamProxy(io.IOStream): |
|
25 | 25 | """Proxy for sys.stdout/err. This will request the stream *at call time* |
|
26 | 26 | allowing for nose's Capture plugin's redirection of sys.stdout/err. |
|
27 | 27 | |
|
28 | 28 | Parameters |
|
29 | 29 | ---------- |
|
30 | 30 | name : str |
|
31 | 31 | The name of the stream. This will be requested anew at every call |
|
32 | 32 | """ |
|
33 | 33 | |
|
34 | 34 | def __init__(self, name): |
|
35 | 35 | warnings.warn("StreamProxy is deprecated and unused as of IPython 5", DeprecationWarning, |
|
36 | 36 | stacklevel=2, |
|
37 | 37 | ) |
|
38 | 38 | self.name=name |
|
39 | 39 | |
|
40 | 40 | @property |
|
41 | 41 | def stream(self): |
|
42 | 42 | return getattr(sys, self.name) |
|
43 | 43 | |
|
44 | 44 | def flush(self): |
|
45 | 45 | self.stream.flush() |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def get_ipython(): |
|
49 | 49 | # This will get replaced by the real thing once we start IPython below |
|
50 | 50 | return start_ipython() |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | # A couple of methods to override those in the running IPython to interact |
|
54 | 54 | # better with doctest (doctest captures on raw stdout, so we need to direct |
|
55 | 55 | # various types of output there otherwise it will miss them). |
|
56 | 56 | |
|
57 | 57 | def xsys(self, cmd): |
|
58 | 58 | """Replace the default system call with a capturing one for doctest. |
|
59 | 59 | """ |
|
60 | 60 | # We use getoutput, but we need to strip it because pexpect captures |
|
61 | 61 | # the trailing newline differently from commands.getoutput |
|
62 | 62 | print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout) |
|
63 | 63 | sys.stdout.flush() |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | def _showtraceback(self, etype, evalue, stb): |
|
67 | 67 | """Print the traceback purely on stdout for doctest to capture it. |
|
68 | 68 | """ |
|
69 | 69 | print(self.InteractiveTB.stb2text(stb), file=sys.stdout) |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | def start_ipython(): |
|
73 | 73 | """Start a global IPython shell, which we need for IPython-specific syntax. |
|
74 | 74 | """ |
|
75 | 75 | global get_ipython |
|
76 | 76 | |
|
77 | 77 | # This function should only ever run once! |
|
78 | 78 | if hasattr(start_ipython, 'already_called'): |
|
79 | 79 | return |
|
80 | 80 | start_ipython.already_called = True |
|
81 | 81 | |
|
82 | 82 | # Store certain global objects that IPython modifies |
|
83 | 83 | _displayhook = sys.displayhook |
|
84 | 84 | _excepthook = sys.excepthook |
|
85 | 85 | _main = sys.modules.get('__main__') |
|
86 | 86 | |
|
87 | 87 | # Create custom argv and namespaces for our IPython to be test-friendly |
|
88 | 88 | config = tools.default_config() |
|
89 | 89 | config.TerminalInteractiveShell.simple_prompt = True |
|
90 | 90 | |
|
91 | 91 | # Create and initialize our test-friendly IPython instance. |
|
92 | 92 | shell = TerminalInteractiveShell.instance(config=config, |
|
93 | 93 | ) |
|
94 | 94 | |
|
95 | 95 | # A few more tweaks needed for playing nicely with doctests... |
|
96 | 96 | |
|
97 | 97 | # remove history file |
|
98 | 98 | shell.tempfiles.append(config.HistoryManager.hist_file) |
|
99 | 99 | |
|
100 | 100 | # These traps are normally only active for interactive use, set them |
|
101 | 101 | # permanently since we'll be mocking interactive sessions. |
|
102 | 102 | shell.builtin_trap.activate() |
|
103 | 103 | |
|
104 | 104 | # Modify the IPython system call with one that uses getoutput, so that we |
|
105 | 105 | # can capture subcommands and print them to Python's stdout, otherwise the |
|
106 | 106 | # doctest machinery would miss them. |
|
107 |
shell.system = |
|
|
108 | ||
|
109 |
shell._showtraceback = |
|
|
107 | shell.system = types.MethodType(xsys, shell) | |
|
108 | ||
|
109 | shell._showtraceback = types.MethodType(_showtraceback, shell) | |
|
110 | 110 | |
|
111 | 111 | # IPython is ready, now clean up some global state... |
|
112 | 112 | |
|
113 | 113 | # Deactivate the various python system hooks added by ipython for |
|
114 | 114 | # interactive convenience so we don't confuse the doctest system |
|
115 | 115 | sys.modules['__main__'] = _main |
|
116 | 116 | sys.displayhook = _displayhook |
|
117 | 117 | sys.excepthook = _excepthook |
|
118 | 118 | |
|
119 | 119 | # So that ipython magics and aliases can be doctested (they work by making |
|
120 | 120 | # a call into a global _ip object). Also make the top-level get_ipython |
|
121 | 121 | # now return this without recursively calling here again. |
|
122 | 122 | _ip = shell |
|
123 | 123 | get_ipython = _ip.get_ipython |
|
124 | 124 | builtin_mod._ip = _ip |
|
125 | 125 | builtin_mod.get_ipython = get_ipython |
|
126 | 126 | |
|
127 | 127 | # Override paging, so we don't require user interaction during the tests. |
|
128 | 128 | def nopage(strng, start=0, screen_lines=0, pager_cmd=None): |
|
129 | 129 | if isinstance(strng, dict): |
|
130 | 130 | strng = strng.get('text/plain', '') |
|
131 | 131 | print(strng) |
|
132 | 132 | |
|
133 | 133 | page.orig_page = page.pager_page |
|
134 | 134 | page.pager_page = nopage |
|
135 | 135 | |
|
136 | 136 | return _ip |
@@ -1,454 +1,454 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """IPython Test Suite Runner. |
|
3 | 3 | |
|
4 | 4 | This module provides a main entry point to a user script to test IPython |
|
5 | 5 | itself from the command line. There are two ways of running this script: |
|
6 | 6 | |
|
7 | 7 | 1. With the syntax `iptest all`. This runs our entire test suite by |
|
8 | 8 | calling this script (with different arguments) recursively. This |
|
9 | 9 | causes modules and package to be tested in different processes, using nose |
|
10 | 10 | or trial where appropriate. |
|
11 | 11 | 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form |
|
12 | 12 | the script simply calls nose, but with special command line flags and |
|
13 | 13 | plugins loaded. |
|
14 | 14 | |
|
15 | 15 | """ |
|
16 | 16 | |
|
17 | 17 | # Copyright (c) IPython Development Team. |
|
18 | 18 | # Distributed under the terms of the Modified BSD License. |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | import glob |
|
22 | 22 | from io import BytesIO |
|
23 | 23 | import os |
|
24 | 24 | import os.path as path |
|
25 | 25 | import sys |
|
26 | 26 | from threading import Thread, Lock, Event |
|
27 | 27 | import warnings |
|
28 | 28 | |
|
29 | 29 | import nose.plugins.builtin |
|
30 | 30 | from nose.plugins.xunit import Xunit |
|
31 | 31 | from nose import SkipTest |
|
32 | 32 | from nose.core import TestProgram |
|
33 | 33 | from nose.plugins import Plugin |
|
34 | 34 | from nose.util import safe_str |
|
35 | 35 | |
|
36 | 36 | from IPython import version_info |
|
37 |
from IPython.utils.py3compat import |
|
|
37 | from IPython.utils.py3compat import decode | |
|
38 | 38 | from IPython.utils.importstring import import_item |
|
39 | 39 | from IPython.testing.plugin.ipdoctest import IPythonDoctest |
|
40 | 40 | from IPython.external.decorators import KnownFailure, knownfailureif |
|
41 | 41 | |
|
42 | 42 | pjoin = path.join |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | # Enable printing all warnings raise by IPython's modules |
|
46 | 46 | warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*') |
|
47 | 47 | warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*') |
|
48 | 48 | warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*') |
|
49 | 49 | warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*') |
|
50 | 50 | |
|
51 | 51 | warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*') |
|
52 | 52 | warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*') |
|
53 | 53 | warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*') |
|
54 | 54 | warnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*') |
|
55 | 55 | warnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*') |
|
56 | 56 | |
|
57 | 57 | warnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*') |
|
58 | 58 | |
|
59 | 59 | warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*') |
|
60 | 60 | |
|
61 | 61 | # Jedi older versions |
|
62 | 62 | warnings.filterwarnings( |
|
63 | 63 | 'error', message='.*elementwise != comparison failed and.*', category=FutureWarning, module='.*') |
|
64 | 64 | |
|
65 | 65 | if version_info < (6,): |
|
66 | 66 | # nose.tools renames all things from `camelCase` to `snake_case` which raise an |
|
67 | 67 | # warning with the runner they also import from standard import library. (as of Dec 2015) |
|
68 | 68 | # Ignore, let's revisit that in a couple of years for IPython 6. |
|
69 | 69 | warnings.filterwarnings( |
|
70 | 70 | 'ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*') |
|
71 | 71 | |
|
72 | 72 | if version_info < (7,): |
|
73 | 73 | warnings.filterwarnings('ignore', message='.*Completer.complete.*', |
|
74 | 74 | category=PendingDeprecationWarning, module='.*') |
|
75 | 75 | else: |
|
76 | 76 | warnings.warn( |
|
77 | 77 | 'Completer.complete was pending deprecation and should be changed to Deprecated', FutureWarning) |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | |
|
81 | 81 | # ------------------------------------------------------------------------------ |
|
82 | 82 | # Monkeypatch Xunit to count known failures as skipped. |
|
83 | 83 | # ------------------------------------------------------------------------------ |
|
84 | 84 | def monkeypatch_xunit(): |
|
85 | 85 | try: |
|
86 | 86 | knownfailureif(True)(lambda: None)() |
|
87 | 87 | except Exception as e: |
|
88 | 88 | KnownFailureTest = type(e) |
|
89 | 89 | |
|
90 | 90 | def addError(self, test, err, capt=None): |
|
91 | 91 | if issubclass(err[0], KnownFailureTest): |
|
92 | 92 | err = (SkipTest,) + err[1:] |
|
93 | 93 | return self.orig_addError(test, err, capt) |
|
94 | 94 | |
|
95 | 95 | Xunit.orig_addError = Xunit.addError |
|
96 | 96 | Xunit.addError = addError |
|
97 | 97 | |
|
98 | 98 | #----------------------------------------------------------------------------- |
|
99 | 99 | # Check which dependencies are installed and greater than minimum version. |
|
100 | 100 | #----------------------------------------------------------------------------- |
|
101 | 101 | def extract_version(mod): |
|
102 | 102 | return mod.__version__ |
|
103 | 103 | |
|
104 | 104 | def test_for(item, min_version=None, callback=extract_version): |
|
105 | 105 | """Test to see if item is importable, and optionally check against a minimum |
|
106 | 106 | version. |
|
107 | 107 | |
|
108 | 108 | If min_version is given, the default behavior is to check against the |
|
109 | 109 | `__version__` attribute of the item, but specifying `callback` allows you to |
|
110 | 110 | extract the value you are interested in. e.g:: |
|
111 | 111 | |
|
112 | 112 | In [1]: import sys |
|
113 | 113 | |
|
114 | 114 | In [2]: from IPython.testing.iptest import test_for |
|
115 | 115 | |
|
116 | 116 | In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info) |
|
117 | 117 | Out[3]: True |
|
118 | 118 | |
|
119 | 119 | """ |
|
120 | 120 | try: |
|
121 | 121 | check = import_item(item) |
|
122 | 122 | except (ImportError, RuntimeError): |
|
123 | 123 | # GTK reports Runtime error if it can't be initialized even if it's |
|
124 | 124 | # importable. |
|
125 | 125 | return False |
|
126 | 126 | else: |
|
127 | 127 | if min_version: |
|
128 | 128 | if callback: |
|
129 | 129 | # extra processing step to get version to compare |
|
130 | 130 | check = callback(check) |
|
131 | 131 | |
|
132 | 132 | return check >= min_version |
|
133 | 133 | else: |
|
134 | 134 | return True |
|
135 | 135 | |
|
136 | 136 | # Global dict where we can store information on what we have and what we don't |
|
137 | 137 | # have available at test run time |
|
138 | 138 | have = {'matplotlib': test_for('matplotlib'), |
|
139 | 139 | 'pygments': test_for('pygments'), |
|
140 | 140 | 'sqlite3': test_for('sqlite3')} |
|
141 | 141 | |
|
142 | 142 | #----------------------------------------------------------------------------- |
|
143 | 143 | # Test suite definitions |
|
144 | 144 | #----------------------------------------------------------------------------- |
|
145 | 145 | |
|
146 | 146 | test_group_names = ['core', |
|
147 | 147 | 'extensions', 'lib', 'terminal', 'testing', 'utils', |
|
148 | 148 | ] |
|
149 | 149 | |
|
150 | 150 | class TestSection(object): |
|
151 | 151 | def __init__(self, name, includes): |
|
152 | 152 | self.name = name |
|
153 | 153 | self.includes = includes |
|
154 | 154 | self.excludes = [] |
|
155 | 155 | self.dependencies = [] |
|
156 | 156 | self.enabled = True |
|
157 | 157 | |
|
158 | 158 | def exclude(self, module): |
|
159 | 159 | if not module.startswith('IPython'): |
|
160 | 160 | module = self.includes[0] + "." + module |
|
161 | 161 | self.excludes.append(module.replace('.', os.sep)) |
|
162 | 162 | |
|
163 | 163 | def requires(self, *packages): |
|
164 | 164 | self.dependencies.extend(packages) |
|
165 | 165 | |
|
166 | 166 | @property |
|
167 | 167 | def will_run(self): |
|
168 | 168 | return self.enabled and all(have[p] for p in self.dependencies) |
|
169 | 169 | |
|
170 | 170 | # Name -> (include, exclude, dependencies_met) |
|
171 | 171 | test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names} |
|
172 | 172 | |
|
173 | 173 | |
|
174 | 174 | # Exclusions and dependencies |
|
175 | 175 | # --------------------------- |
|
176 | 176 | |
|
177 | 177 | # core: |
|
178 | 178 | sec = test_sections['core'] |
|
179 | 179 | if not have['sqlite3']: |
|
180 | 180 | sec.exclude('tests.test_history') |
|
181 | 181 | sec.exclude('history') |
|
182 | 182 | if not have['matplotlib']: |
|
183 | 183 | sec.exclude('pylabtools'), |
|
184 | 184 | sec.exclude('tests.test_pylabtools') |
|
185 | 185 | |
|
186 | 186 | # lib: |
|
187 | 187 | sec = test_sections['lib'] |
|
188 | 188 | sec.exclude('kernel') |
|
189 | 189 | if not have['pygments']: |
|
190 | 190 | sec.exclude('tests.test_lexers') |
|
191 | 191 | # We do this unconditionally, so that the test suite doesn't import |
|
192 | 192 | # gtk, changing the default encoding and masking some unicode bugs. |
|
193 | 193 | sec.exclude('inputhookgtk') |
|
194 | 194 | # We also do this unconditionally, because wx can interfere with Unix signals. |
|
195 | 195 | # There are currently no tests for it anyway. |
|
196 | 196 | sec.exclude('inputhookwx') |
|
197 | 197 | # Testing inputhook will need a lot of thought, to figure out |
|
198 | 198 | # how to have tests that don't lock up with the gui event |
|
199 | 199 | # loops in the picture |
|
200 | 200 | sec.exclude('inputhook') |
|
201 | 201 | |
|
202 | 202 | # testing: |
|
203 | 203 | sec = test_sections['testing'] |
|
204 | 204 | # These have to be skipped on win32 because they use echo, rm, cd, etc. |
|
205 | 205 | # See ticket https://github.com/ipython/ipython/issues/87 |
|
206 | 206 | if sys.platform == 'win32': |
|
207 | 207 | sec.exclude('plugin.test_exampleip') |
|
208 | 208 | sec.exclude('plugin.dtexample') |
|
209 | 209 | |
|
210 | 210 | # don't run jupyter_console tests found via shim |
|
211 | 211 | test_sections['terminal'].exclude('console') |
|
212 | 212 | |
|
213 | 213 | # extensions: |
|
214 | 214 | sec = test_sections['extensions'] |
|
215 | 215 | # This is deprecated in favour of rpy2 |
|
216 | 216 | sec.exclude('rmagic') |
|
217 | 217 | # autoreload does some strange stuff, so move it to its own test section |
|
218 | 218 | sec.exclude('autoreload') |
|
219 | 219 | sec.exclude('tests.test_autoreload') |
|
220 | 220 | test_sections['autoreload'] = TestSection('autoreload', |
|
221 | 221 | ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload']) |
|
222 | 222 | test_group_names.append('autoreload') |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | #----------------------------------------------------------------------------- |
|
226 | 226 | # Functions and classes |
|
227 | 227 | #----------------------------------------------------------------------------- |
|
228 | 228 | |
|
229 | 229 | def check_exclusions_exist(): |
|
230 | 230 | from IPython.paths import get_ipython_package_dir |
|
231 | 231 | from warnings import warn |
|
232 | 232 | parent = os.path.dirname(get_ipython_package_dir()) |
|
233 | 233 | for sec in test_sections: |
|
234 | 234 | for pattern in sec.exclusions: |
|
235 | 235 | fullpath = pjoin(parent, pattern) |
|
236 | 236 | if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'): |
|
237 | 237 | warn("Excluding nonexistent file: %r" % pattern) |
|
238 | 238 | |
|
239 | 239 | |
|
240 | 240 | class ExclusionPlugin(Plugin): |
|
241 | 241 | """A nose plugin to effect our exclusions of files and directories. |
|
242 | 242 | """ |
|
243 | 243 | name = 'exclusions' |
|
244 | 244 | score = 3000 # Should come before any other plugins |
|
245 | 245 | |
|
246 | 246 | def __init__(self, exclude_patterns=None): |
|
247 | 247 | """ |
|
248 | 248 | Parameters |
|
249 | 249 | ---------- |
|
250 | 250 | |
|
251 | 251 | exclude_patterns : sequence of strings, optional |
|
252 | 252 | Filenames containing these patterns (as raw strings, not as regular |
|
253 | 253 | expressions) are excluded from the tests. |
|
254 | 254 | """ |
|
255 | 255 | self.exclude_patterns = exclude_patterns or [] |
|
256 | 256 | super(ExclusionPlugin, self).__init__() |
|
257 | 257 | |
|
258 | 258 | def options(self, parser, env=os.environ): |
|
259 | 259 | Plugin.options(self, parser, env) |
|
260 | 260 | |
|
261 | 261 | def configure(self, options, config): |
|
262 | 262 | Plugin.configure(self, options, config) |
|
263 | 263 | # Override nose trying to disable plugin. |
|
264 | 264 | self.enabled = True |
|
265 | 265 | |
|
266 | 266 | def wantFile(self, filename): |
|
267 | 267 | """Return whether the given filename should be scanned for tests. |
|
268 | 268 | """ |
|
269 | 269 | if any(pat in filename for pat in self.exclude_patterns): |
|
270 | 270 | return False |
|
271 | 271 | return None |
|
272 | 272 | |
|
273 | 273 | def wantDirectory(self, directory): |
|
274 | 274 | """Return whether the given directory should be scanned for tests. |
|
275 | 275 | """ |
|
276 | 276 | if any(pat in directory for pat in self.exclude_patterns): |
|
277 | 277 | return False |
|
278 | 278 | return None |
|
279 | 279 | |
|
280 | 280 | |
|
281 | 281 | class StreamCapturer(Thread): |
|
282 | 282 | daemon = True # Don't hang if main thread crashes |
|
283 | 283 | started = False |
|
284 | 284 | def __init__(self, echo=False): |
|
285 | 285 | super(StreamCapturer, self).__init__() |
|
286 | 286 | self.echo = echo |
|
287 | 287 | self.streams = [] |
|
288 | 288 | self.buffer = BytesIO() |
|
289 | 289 | self.readfd, self.writefd = os.pipe() |
|
290 | 290 | self.buffer_lock = Lock() |
|
291 | 291 | self.stop = Event() |
|
292 | 292 | |
|
293 | 293 | def run(self): |
|
294 | 294 | self.started = True |
|
295 | 295 | |
|
296 | 296 | while not self.stop.is_set(): |
|
297 | 297 | chunk = os.read(self.readfd, 1024) |
|
298 | 298 | |
|
299 | 299 | with self.buffer_lock: |
|
300 | 300 | self.buffer.write(chunk) |
|
301 | 301 | if self.echo: |
|
302 |
sys.stdout.write( |
|
|
303 | ||
|
302 | sys.stdout.write(decode(chunk)) | |
|
303 | ||
|
304 | 304 | os.close(self.readfd) |
|
305 | 305 | os.close(self.writefd) |
|
306 | 306 | |
|
307 | 307 | def reset_buffer(self): |
|
308 | 308 | with self.buffer_lock: |
|
309 | 309 | self.buffer.truncate(0) |
|
310 | 310 | self.buffer.seek(0) |
|
311 | 311 | |
|
312 | 312 | def get_buffer(self): |
|
313 | 313 | with self.buffer_lock: |
|
314 | 314 | return self.buffer.getvalue() |
|
315 | 315 | |
|
316 | 316 | def ensure_started(self): |
|
317 | 317 | if not self.started: |
|
318 | 318 | self.start() |
|
319 | 319 | |
|
320 | 320 | def halt(self): |
|
321 | 321 | """Safely stop the thread.""" |
|
322 | 322 | if not self.started: |
|
323 | 323 | return |
|
324 | 324 | |
|
325 | 325 | self.stop.set() |
|
326 | 326 | os.write(self.writefd, b'\0') # Ensure we're not locked in a read() |
|
327 | 327 | self.join() |
|
328 | 328 | |
|
329 | 329 | class SubprocessStreamCapturePlugin(Plugin): |
|
330 | 330 | name='subprocstreams' |
|
331 | 331 | def __init__(self): |
|
332 | 332 | Plugin.__init__(self) |
|
333 | 333 | self.stream_capturer = StreamCapturer() |
|
334 | 334 | self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture') |
|
335 | 335 | # This is ugly, but distant parts of the test machinery need to be able |
|
336 | 336 | # to redirect streams, so we make the object globally accessible. |
|
337 | 337 | nose.iptest_stdstreams_fileno = self.get_write_fileno |
|
338 | 338 | |
|
339 | 339 | def get_write_fileno(self): |
|
340 | 340 | if self.destination == 'capture': |
|
341 | 341 | self.stream_capturer.ensure_started() |
|
342 | 342 | return self.stream_capturer.writefd |
|
343 | 343 | elif self.destination == 'discard': |
|
344 | 344 | return os.open(os.devnull, os.O_WRONLY) |
|
345 | 345 | else: |
|
346 | 346 | return sys.__stdout__.fileno() |
|
347 | 347 | |
|
348 | 348 | def configure(self, options, config): |
|
349 | 349 | Plugin.configure(self, options, config) |
|
350 | 350 | # Override nose trying to disable plugin. |
|
351 | 351 | if self.destination == 'capture': |
|
352 | 352 | self.enabled = True |
|
353 | 353 | |
|
354 | 354 | def startTest(self, test): |
|
355 | 355 | # Reset log capture |
|
356 | 356 | self.stream_capturer.reset_buffer() |
|
357 | 357 | |
|
358 | 358 | def formatFailure(self, test, err): |
|
359 | 359 | # Show output |
|
360 | 360 | ec, ev, tb = err |
|
361 | 361 | captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace') |
|
362 | 362 | if captured.strip(): |
|
363 | 363 | ev = safe_str(ev) |
|
364 | 364 | out = [ev, '>> begin captured subprocess output <<', |
|
365 | 365 | captured, |
|
366 | 366 | '>> end captured subprocess output <<'] |
|
367 | 367 | return ec, '\n'.join(out), tb |
|
368 | 368 | |
|
369 | 369 | return err |
|
370 | 370 | |
|
371 | 371 | formatError = formatFailure |
|
372 | 372 | |
|
373 | 373 | def finalize(self, result): |
|
374 | 374 | self.stream_capturer.halt() |
|
375 | 375 | |
|
376 | 376 | |
|
377 | 377 | def run_iptest(): |
|
378 | 378 | """Run the IPython test suite using nose. |
|
379 | 379 | |
|
380 | 380 | This function is called when this script is **not** called with the form |
|
381 | 381 | `iptest all`. It simply calls nose with appropriate command line flags |
|
382 | 382 | and accepts all of the standard nose arguments. |
|
383 | 383 | """ |
|
384 | 384 | # Apply our monkeypatch to Xunit |
|
385 | 385 | if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'): |
|
386 | 386 | monkeypatch_xunit() |
|
387 | 387 | |
|
388 | 388 | arg1 = sys.argv[1] |
|
389 | 389 | if arg1 in test_sections: |
|
390 | 390 | section = test_sections[arg1] |
|
391 | 391 | sys.argv[1:2] = section.includes |
|
392 | 392 | elif arg1.startswith('IPython.') and arg1[8:] in test_sections: |
|
393 | 393 | section = test_sections[arg1[8:]] |
|
394 | 394 | sys.argv[1:2] = section.includes |
|
395 | 395 | else: |
|
396 | 396 | section = TestSection(arg1, includes=[arg1]) |
|
397 | 397 | |
|
398 | 398 | |
|
399 | 399 | argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks |
|
400 | 400 | # We add --exe because of setuptools' imbecility (it |
|
401 | 401 | # blindly does chmod +x on ALL files). Nose does the |
|
402 | 402 | # right thing and it tries to avoid executables, |
|
403 | 403 | # setuptools unfortunately forces our hand here. This |
|
404 | 404 | # has been discussed on the distutils list and the |
|
405 | 405 | # setuptools devs refuse to fix this problem! |
|
406 | 406 | '--exe', |
|
407 | 407 | ] |
|
408 | 408 | if '-a' not in argv and '-A' not in argv: |
|
409 | 409 | argv = argv + ['-a', '!crash'] |
|
410 | 410 | |
|
411 | 411 | if nose.__version__ >= '0.11': |
|
412 | 412 | # I don't fully understand why we need this one, but depending on what |
|
413 | 413 | # directory the test suite is run from, if we don't give it, 0 tests |
|
414 | 414 | # get run. Specifically, if the test suite is run from the source dir |
|
415 | 415 | # with an argument (like 'iptest.py IPython.core', 0 tests are run, |
|
416 | 416 | # even if the same call done in this directory works fine). It appears |
|
417 | 417 | # that if the requested package is in the current dir, nose bails early |
|
418 | 418 | # by default. Since it's otherwise harmless, leave it in by default |
|
419 | 419 | # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it. |
|
420 | 420 | argv.append('--traverse-namespace') |
|
421 | 421 | |
|
422 | 422 | plugins = [ ExclusionPlugin(section.excludes), KnownFailure(), |
|
423 | 423 | SubprocessStreamCapturePlugin() ] |
|
424 | 424 | |
|
425 | 425 | # we still have some vestigial doctests in core |
|
426 | 426 | if (section.name.startswith(('core', 'IPython.core', 'IPython.utils'))): |
|
427 | 427 | plugins.append(IPythonDoctest()) |
|
428 | 428 | argv.extend([ |
|
429 | 429 | '--with-ipdoctest', |
|
430 | 430 | '--ipdoctest-tests', |
|
431 | 431 | '--ipdoctest-extension=txt', |
|
432 | 432 | ]) |
|
433 | 433 | |
|
434 | 434 | |
|
435 | 435 | # Use working directory set by parent process (see iptestcontroller) |
|
436 | 436 | if 'IPTEST_WORKING_DIR' in os.environ: |
|
437 | 437 | os.chdir(os.environ['IPTEST_WORKING_DIR']) |
|
438 | 438 | |
|
439 | 439 | # We need a global ipython running in this process, but the special |
|
440 | 440 | # in-process group spawns its own IPython kernels, so for *that* group we |
|
441 | 441 | # must avoid also opening the global one (otherwise there's a conflict of |
|
442 | 442 | # singletons). Ultimately the solution to this problem is to refactor our |
|
443 | 443 | # assumptions about what needs to be a singleton and what doesn't (app |
|
444 | 444 | # objects should, individual shells shouldn't). But for now, this |
|
445 | 445 | # workaround allows the test suite for the inprocess module to complete. |
|
446 | 446 | if 'kernel.inprocess' not in section.name: |
|
447 | 447 | from IPython.testing import globalipapp |
|
448 | 448 | globalipapp.start_ipython() |
|
449 | 449 | |
|
450 | 450 | # Now nose can run |
|
451 | 451 | TestProgram(argv=argv, addplugins=plugins) |
|
452 | 452 | |
|
453 | 453 | if __name__ == '__main__': |
|
454 | 454 | run_iptest() |
@@ -1,510 +1,510 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """IPython Test Process Controller |
|
3 | 3 | |
|
4 | 4 | This module runs one or more subprocesses which will actually run the IPython |
|
5 | 5 | test suite. |
|
6 | 6 | |
|
7 | 7 | """ |
|
8 | 8 | |
|
9 | 9 | # Copyright (c) IPython Development Team. |
|
10 | 10 | # Distributed under the terms of the Modified BSD License. |
|
11 | 11 | |
|
12 | 12 | |
|
13 | 13 | import argparse |
|
14 | 14 | import multiprocessing.pool |
|
15 | 15 | import os |
|
16 | 16 | import stat |
|
17 | 17 | import shutil |
|
18 | 18 | import signal |
|
19 | 19 | import sys |
|
20 | 20 | import subprocess |
|
21 | 21 | import time |
|
22 | 22 | |
|
23 | 23 | from .iptest import ( |
|
24 | 24 | have, test_group_names as py_test_group_names, test_sections, StreamCapturer, |
|
25 | 25 | ) |
|
26 | 26 | from IPython.utils.path import compress_user |
|
27 |
from IPython.utils.py3compat import |
|
|
27 | from IPython.utils.py3compat import decode | |
|
28 | 28 | from IPython.utils.sysinfo import get_sys_info |
|
29 | 29 | from IPython.utils.tempdir import TemporaryDirectory |
|
30 | 30 | |
|
31 | 31 | def popen_wait(p, timeout): |
|
32 | 32 | return p.wait(timeout) |
|
33 | 33 | |
|
34 | 34 | class TestController(object): |
|
35 | 35 | """Run tests in a subprocess |
|
36 | 36 | """ |
|
37 | 37 | #: str, IPython test suite to be executed. |
|
38 | 38 | section = None |
|
39 | 39 | #: list, command line arguments to be executed |
|
40 | 40 | cmd = None |
|
41 | 41 | #: dict, extra environment variables to set for the subprocess |
|
42 | 42 | env = None |
|
43 | 43 | #: list, TemporaryDirectory instances to clear up when the process finishes |
|
44 | 44 | dirs = None |
|
45 | 45 | #: subprocess.Popen instance |
|
46 | 46 | process = None |
|
47 | 47 | #: str, process stdout+stderr |
|
48 | 48 | stdout = None |
|
49 | 49 | |
|
50 | 50 | def __init__(self): |
|
51 | 51 | self.cmd = [] |
|
52 | 52 | self.env = {} |
|
53 | 53 | self.dirs = [] |
|
54 | 54 | |
|
55 | 55 | def setup(self): |
|
56 | 56 | """Create temporary directories etc. |
|
57 | 57 | |
|
58 | 58 | This is only called when we know the test group will be run. Things |
|
59 | 59 | created here may be cleaned up by self.cleanup(). |
|
60 | 60 | """ |
|
61 | 61 | pass |
|
62 | 62 | |
|
63 | 63 | def launch(self, buffer_output=False, capture_output=False): |
|
64 | 64 | # print('*** ENV:', self.env) # dbg |
|
65 | 65 | # print('*** CMD:', self.cmd) # dbg |
|
66 | 66 | env = os.environ.copy() |
|
67 | 67 | env.update(self.env) |
|
68 | 68 | if buffer_output: |
|
69 | 69 | capture_output = True |
|
70 | 70 | self.stdout_capturer = c = StreamCapturer(echo=not buffer_output) |
|
71 | 71 | c.start() |
|
72 | 72 | stdout = c.writefd if capture_output else None |
|
73 | 73 | stderr = subprocess.STDOUT if capture_output else None |
|
74 | 74 | self.process = subprocess.Popen(self.cmd, stdout=stdout, |
|
75 | 75 | stderr=stderr, env=env) |
|
76 | 76 | |
|
77 | 77 | def wait(self): |
|
78 | 78 | self.process.wait() |
|
79 | 79 | self.stdout_capturer.halt() |
|
80 | 80 | self.stdout = self.stdout_capturer.get_buffer() |
|
81 | 81 | return self.process.returncode |
|
82 | 82 | |
|
83 | 83 | def print_extra_info(self): |
|
84 | 84 | """Print extra information about this test run. |
|
85 | 85 | |
|
86 | 86 | If we're running in parallel and showing the concise view, this is only |
|
87 | 87 | called if the test group fails. Otherwise, it's called before the test |
|
88 | 88 | group is started. |
|
89 | 89 | |
|
90 | 90 | The base implementation does nothing, but it can be overridden by |
|
91 | 91 | subclasses. |
|
92 | 92 | """ |
|
93 | 93 | return |
|
94 | 94 | |
|
95 | 95 | def cleanup_process(self): |
|
96 | 96 | """Cleanup on exit by killing any leftover processes.""" |
|
97 | 97 | subp = self.process |
|
98 | 98 | if subp is None or (subp.poll() is not None): |
|
99 | 99 | return # Process doesn't exist, or is already dead. |
|
100 | 100 | |
|
101 | 101 | try: |
|
102 | 102 | print('Cleaning up stale PID: %d' % subp.pid) |
|
103 | 103 | subp.kill() |
|
104 | 104 | except: # (OSError, WindowsError) ? |
|
105 | 105 | # This is just a best effort, if we fail or the process was |
|
106 | 106 | # really gone, ignore it. |
|
107 | 107 | pass |
|
108 | 108 | else: |
|
109 | 109 | for i in range(10): |
|
110 | 110 | if subp.poll() is None: |
|
111 | 111 | time.sleep(0.1) |
|
112 | 112 | else: |
|
113 | 113 | break |
|
114 | 114 | |
|
115 | 115 | if subp.poll() is None: |
|
116 | 116 | # The process did not die... |
|
117 | 117 | print('... failed. Manual cleanup may be required.') |
|
118 | 118 | |
|
119 | 119 | def cleanup(self): |
|
120 | 120 | "Kill process if it's still alive, and clean up temporary directories" |
|
121 | 121 | self.cleanup_process() |
|
122 | 122 | for td in self.dirs: |
|
123 | 123 | td.cleanup() |
|
124 | 124 | |
|
125 | 125 | __del__ = cleanup |
|
126 | 126 | |
|
127 | 127 | |
|
128 | 128 | class PyTestController(TestController): |
|
129 | 129 | """Run Python tests using IPython.testing.iptest""" |
|
130 | 130 | #: str, Python command to execute in subprocess |
|
131 | 131 | pycmd = None |
|
132 | 132 | |
|
133 | 133 | def __init__(self, section, options): |
|
134 | 134 | """Create new test runner.""" |
|
135 | 135 | TestController.__init__(self) |
|
136 | 136 | self.section = section |
|
137 | 137 | # pycmd is put into cmd[2] in PyTestController.launch() |
|
138 | 138 | self.cmd = [sys.executable, '-c', None, section] |
|
139 | 139 | self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()" |
|
140 | 140 | self.options = options |
|
141 | 141 | |
|
142 | 142 | def setup(self): |
|
143 | 143 | ipydir = TemporaryDirectory() |
|
144 | 144 | self.dirs.append(ipydir) |
|
145 | 145 | self.env['IPYTHONDIR'] = ipydir.name |
|
146 | 146 | self.workingdir = workingdir = TemporaryDirectory() |
|
147 | 147 | self.dirs.append(workingdir) |
|
148 | 148 | self.env['IPTEST_WORKING_DIR'] = workingdir.name |
|
149 | 149 | # This means we won't get odd effects from our own matplotlib config |
|
150 | 150 | self.env['MPLCONFIGDIR'] = workingdir.name |
|
151 | 151 | # For security reasons (http://bugs.python.org/issue16202), use |
|
152 | 152 | # a temporary directory to which other users have no access. |
|
153 | 153 | self.env['TMPDIR'] = workingdir.name |
|
154 | 154 | |
|
155 | 155 | # Add a non-accessible directory to PATH (see gh-7053) |
|
156 | 156 | noaccess = os.path.join(self.workingdir.name, "_no_access_") |
|
157 | 157 | self.noaccess = noaccess |
|
158 | 158 | os.mkdir(noaccess, 0) |
|
159 | 159 | |
|
160 | 160 | PATH = os.environ.get('PATH', '') |
|
161 | 161 | if PATH: |
|
162 | 162 | PATH = noaccess + os.pathsep + PATH |
|
163 | 163 | else: |
|
164 | 164 | PATH = noaccess |
|
165 | 165 | self.env['PATH'] = PATH |
|
166 | 166 | |
|
167 | 167 | # From options: |
|
168 | 168 | if self.options.xunit: |
|
169 | 169 | self.add_xunit() |
|
170 | 170 | if self.options.coverage: |
|
171 | 171 | self.add_coverage() |
|
172 | 172 | self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams |
|
173 | 173 | self.cmd.extend(self.options.extra_args) |
|
174 | 174 | |
|
175 | 175 | def cleanup(self): |
|
176 | 176 | """ |
|
177 | 177 | Make the non-accessible directory created in setup() accessible |
|
178 | 178 | again, otherwise deleting the workingdir will fail. |
|
179 | 179 | """ |
|
180 | 180 | os.chmod(self.noaccess, stat.S_IRWXU) |
|
181 | 181 | TestController.cleanup(self) |
|
182 | 182 | |
|
183 | 183 | @property |
|
184 | 184 | def will_run(self): |
|
185 | 185 | try: |
|
186 | 186 | return test_sections[self.section].will_run |
|
187 | 187 | except KeyError: |
|
188 | 188 | return True |
|
189 | 189 | |
|
190 | 190 | def add_xunit(self): |
|
191 | 191 | xunit_file = os.path.abspath(self.section + '.xunit.xml') |
|
192 | 192 | self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file]) |
|
193 | 193 | |
|
194 | 194 | def add_coverage(self): |
|
195 | 195 | try: |
|
196 | 196 | sources = test_sections[self.section].includes |
|
197 | 197 | except KeyError: |
|
198 | 198 | sources = ['IPython'] |
|
199 | 199 | |
|
200 | 200 | coverage_rc = ("[run]\n" |
|
201 | 201 | "data_file = {data_file}\n" |
|
202 | 202 | "source =\n" |
|
203 | 203 | " {source}\n" |
|
204 | 204 | ).format(data_file=os.path.abspath('.coverage.'+self.section), |
|
205 | 205 | source="\n ".join(sources)) |
|
206 | 206 | config_file = os.path.join(self.workingdir.name, '.coveragerc') |
|
207 | 207 | with open(config_file, 'w') as f: |
|
208 | 208 | f.write(coverage_rc) |
|
209 | 209 | |
|
210 | 210 | self.env['COVERAGE_PROCESS_START'] = config_file |
|
211 | 211 | self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd |
|
212 | 212 | |
|
213 | 213 | def launch(self, buffer_output=False): |
|
214 | 214 | self.cmd[2] = self.pycmd |
|
215 | 215 | super(PyTestController, self).launch(buffer_output=buffer_output) |
|
216 | 216 | |
|
217 | 217 | |
|
218 | 218 | def prepare_controllers(options): |
|
219 | 219 | """Returns two lists of TestController instances, those to run, and those |
|
220 | 220 | not to run.""" |
|
221 | 221 | testgroups = options.testgroups |
|
222 | 222 | if not testgroups: |
|
223 | 223 | testgroups = py_test_group_names |
|
224 | 224 | |
|
225 | 225 | controllers = [PyTestController(name, options) for name in testgroups] |
|
226 | 226 | |
|
227 | 227 | to_run = [c for c in controllers if c.will_run] |
|
228 | 228 | not_run = [c for c in controllers if not c.will_run] |
|
229 | 229 | return to_run, not_run |
|
230 | 230 | |
|
231 | 231 | def do_run(controller, buffer_output=True): |
|
232 | 232 | """Setup and run a test controller. |
|
233 | 233 | |
|
234 | 234 | If buffer_output is True, no output is displayed, to avoid it appearing |
|
235 | 235 | interleaved. In this case, the caller is responsible for displaying test |
|
236 | 236 | output on failure. |
|
237 | 237 | |
|
238 | 238 | Returns |
|
239 | 239 | ------- |
|
240 | 240 | controller : TestController |
|
241 | 241 | The same controller as passed in, as a convenience for using map() type |
|
242 | 242 | APIs. |
|
243 | 243 | exitcode : int |
|
244 | 244 | The exit code of the test subprocess. Non-zero indicates failure. |
|
245 | 245 | """ |
|
246 | 246 | try: |
|
247 | 247 | try: |
|
248 | 248 | controller.setup() |
|
249 | 249 | if not buffer_output: |
|
250 | 250 | controller.print_extra_info() |
|
251 | 251 | controller.launch(buffer_output=buffer_output) |
|
252 | 252 | except Exception: |
|
253 | 253 | import traceback |
|
254 | 254 | traceback.print_exc() |
|
255 | 255 | return controller, 1 # signal failure |
|
256 | 256 | |
|
257 | 257 | exitcode = controller.wait() |
|
258 | 258 | return controller, exitcode |
|
259 | 259 | |
|
260 | 260 | except KeyboardInterrupt: |
|
261 | 261 | return controller, -signal.SIGINT |
|
262 | 262 | finally: |
|
263 | 263 | controller.cleanup() |
|
264 | 264 | |
|
265 | 265 | def report(): |
|
266 | 266 | """Return a string with a summary report of test-related variables.""" |
|
267 | 267 | inf = get_sys_info() |
|
268 | 268 | out = [] |
|
269 | 269 | def _add(name, value): |
|
270 | 270 | out.append((name, value)) |
|
271 | 271 | |
|
272 | 272 | _add('IPython version', inf['ipython_version']) |
|
273 | 273 | _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source'])) |
|
274 | 274 | _add('IPython package', compress_user(inf['ipython_path'])) |
|
275 | 275 | _add('Python version', inf['sys_version'].replace('\n','')) |
|
276 | 276 | _add('sys.executable', compress_user(inf['sys_executable'])) |
|
277 | 277 | _add('Platform', inf['platform']) |
|
278 | 278 | |
|
279 | 279 | width = max(len(n) for (n,v) in out) |
|
280 | 280 | out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out] |
|
281 | 281 | |
|
282 | 282 | avail = [] |
|
283 | 283 | not_avail = [] |
|
284 | 284 | |
|
285 | 285 | for k, is_avail in have.items(): |
|
286 | 286 | if is_avail: |
|
287 | 287 | avail.append(k) |
|
288 | 288 | else: |
|
289 | 289 | not_avail.append(k) |
|
290 | 290 | |
|
291 | 291 | if avail: |
|
292 | 292 | out.append('\nTools and libraries available at test time:\n') |
|
293 | 293 | avail.sort() |
|
294 | 294 | out.append(' ' + ' '.join(avail)+'\n') |
|
295 | 295 | |
|
296 | 296 | if not_avail: |
|
297 | 297 | out.append('\nTools and libraries NOT available at test time:\n') |
|
298 | 298 | not_avail.sort() |
|
299 | 299 | out.append(' ' + ' '.join(not_avail)+'\n') |
|
300 | 300 | |
|
301 | 301 | return ''.join(out) |
|
302 | 302 | |
|
303 | 303 | def run_iptestall(options): |
|
304 | 304 | """Run the entire IPython test suite by calling nose and trial. |
|
305 | 305 | |
|
306 | 306 | This function constructs :class:`IPTester` instances for all IPython |
|
307 | 307 | modules and package and then runs each of them. This causes the modules |
|
308 | 308 | and packages of IPython to be tested each in their own subprocess using |
|
309 | 309 | nose. |
|
310 | 310 | |
|
311 | 311 | Parameters |
|
312 | 312 | ---------- |
|
313 | 313 | |
|
314 | 314 | All parameters are passed as attributes of the options object. |
|
315 | 315 | |
|
316 | 316 | testgroups : list of str |
|
317 | 317 | Run only these sections of the test suite. If empty, run all the available |
|
318 | 318 | sections. |
|
319 | 319 | |
|
320 | 320 | fast : int or None |
|
321 | 321 | Run the test suite in parallel, using n simultaneous processes. If None |
|
322 | 322 | is passed, one process is used per CPU core. Default 1 (i.e. sequential) |
|
323 | 323 | |
|
324 | 324 | inc_slow : bool |
|
325 | 325 | Include slow tests. By default, these tests aren't run. |
|
326 | 326 | |
|
327 | 327 | url : unicode |
|
328 | 328 | Address:port to use when running the JS tests. |
|
329 | 329 | |
|
330 | 330 | xunit : bool |
|
331 | 331 | Produce Xunit XML output. This is written to multiple foo.xunit.xml files. |
|
332 | 332 | |
|
333 | 333 | coverage : bool or str |
|
334 | 334 | Measure code coverage from tests. True will store the raw coverage data, |
|
335 | 335 | or pass 'html' or 'xml' to get reports. |
|
336 | 336 | |
|
337 | 337 | extra_args : list |
|
338 | 338 | Extra arguments to pass to the test subprocesses, e.g. '-v' |
|
339 | 339 | """ |
|
340 | 340 | to_run, not_run = prepare_controllers(options) |
|
341 | 341 | |
|
342 | 342 | def justify(ltext, rtext, width=70, fill='-'): |
|
343 | 343 | ltext += ' ' |
|
344 | 344 | rtext = (' ' + rtext).rjust(width - len(ltext), fill) |
|
345 | 345 | return ltext + rtext |
|
346 | 346 | |
|
347 | 347 | # Run all test runners, tracking execution time |
|
348 | 348 | failed = [] |
|
349 | 349 | t_start = time.time() |
|
350 | 350 | |
|
351 | 351 | print() |
|
352 | 352 | if options.fast == 1: |
|
353 | 353 | # This actually means sequential, i.e. with 1 job |
|
354 | 354 | for controller in to_run: |
|
355 | 355 | print('Test group:', controller.section) |
|
356 | 356 | sys.stdout.flush() # Show in correct order when output is piped |
|
357 | 357 | controller, res = do_run(controller, buffer_output=False) |
|
358 | 358 | if res: |
|
359 | 359 | failed.append(controller) |
|
360 | 360 | if res == -signal.SIGINT: |
|
361 | 361 | print("Interrupted") |
|
362 | 362 | break |
|
363 | 363 | print() |
|
364 | 364 | |
|
365 | 365 | else: |
|
366 | 366 | # Run tests concurrently |
|
367 | 367 | try: |
|
368 | 368 | pool = multiprocessing.pool.ThreadPool(options.fast) |
|
369 | 369 | for (controller, res) in pool.imap_unordered(do_run, to_run): |
|
370 | 370 | res_string = 'OK' if res == 0 else 'FAILED' |
|
371 | 371 | print(justify('Test group: ' + controller.section, res_string)) |
|
372 | 372 | if res: |
|
373 | 373 | controller.print_extra_info() |
|
374 |
print( |
|
|
374 | print(decode(controller.stdout)) | |
|
375 | 375 | failed.append(controller) |
|
376 | 376 | if res == -signal.SIGINT: |
|
377 | 377 | print("Interrupted") |
|
378 | 378 | break |
|
379 | 379 | except KeyboardInterrupt: |
|
380 | 380 | return |
|
381 | 381 | |
|
382 | 382 | for controller in not_run: |
|
383 | 383 | print(justify('Test group: ' + controller.section, 'NOT RUN')) |
|
384 | 384 | |
|
385 | 385 | t_end = time.time() |
|
386 | 386 | t_tests = t_end - t_start |
|
387 | 387 | nrunners = len(to_run) |
|
388 | 388 | nfail = len(failed) |
|
389 | 389 | # summarize results |
|
390 | 390 | print('_'*70) |
|
391 | 391 | print('Test suite completed for system with the following information:') |
|
392 | 392 | print(report()) |
|
393 | 393 | took = "Took %.3fs." % t_tests |
|
394 | 394 | print('Status: ', end='') |
|
395 | 395 | if not failed: |
|
396 | 396 | print('OK (%d test groups).' % nrunners, took) |
|
397 | 397 | else: |
|
398 | 398 | # If anything went wrong, point out what command to rerun manually to |
|
399 | 399 | # see the actual errors and individual summary |
|
400 | 400 | failed_sections = [c.section for c in failed] |
|
401 | 401 | print('ERROR - {} out of {} test groups failed ({}).'.format(nfail, |
|
402 | 402 | nrunners, ', '.join(failed_sections)), took) |
|
403 | 403 | print() |
|
404 | 404 | print('You may wish to rerun these, with:') |
|
405 | 405 | print(' iptest', *failed_sections) |
|
406 | 406 | print() |
|
407 | 407 | |
|
408 | 408 | if options.coverage: |
|
409 | 409 | from coverage import coverage, CoverageException |
|
410 | 410 | cov = coverage(data_file='.coverage') |
|
411 | 411 | cov.combine() |
|
412 | 412 | cov.save() |
|
413 | 413 | |
|
414 | 414 | # Coverage HTML report |
|
415 | 415 | if options.coverage == 'html': |
|
416 | 416 | html_dir = 'ipy_htmlcov' |
|
417 | 417 | shutil.rmtree(html_dir, ignore_errors=True) |
|
418 | 418 | print("Writing HTML coverage report to %s/ ... " % html_dir, end="") |
|
419 | 419 | sys.stdout.flush() |
|
420 | 420 | |
|
421 | 421 | # Custom HTML reporter to clean up module names. |
|
422 | 422 | from coverage.html import HtmlReporter |
|
423 | 423 | class CustomHtmlReporter(HtmlReporter): |
|
424 | 424 | def find_code_units(self, morfs): |
|
425 | 425 | super(CustomHtmlReporter, self).find_code_units(morfs) |
|
426 | 426 | for cu in self.code_units: |
|
427 | 427 | nameparts = cu.name.split(os.sep) |
|
428 | 428 | if 'IPython' not in nameparts: |
|
429 | 429 | continue |
|
430 | 430 | ix = nameparts.index('IPython') |
|
431 | 431 | cu.name = '.'.join(nameparts[ix:]) |
|
432 | 432 | |
|
433 | 433 | # Reimplement the html_report method with our custom reporter |
|
434 | 434 | cov.get_data() |
|
435 | 435 | cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir, |
|
436 | 436 | html_title='IPython test coverage', |
|
437 | 437 | ) |
|
438 | 438 | reporter = CustomHtmlReporter(cov, cov.config) |
|
439 | 439 | reporter.report(None) |
|
440 | 440 | print('done.') |
|
441 | 441 | |
|
442 | 442 | # Coverage XML report |
|
443 | 443 | elif options.coverage == 'xml': |
|
444 | 444 | try: |
|
445 | 445 | cov.xml_report(outfile='ipy_coverage.xml') |
|
446 | 446 | except CoverageException as e: |
|
447 | 447 | print('Generating coverage report failed. Are you running javascript tests only?') |
|
448 | 448 | import traceback |
|
449 | 449 | traceback.print_exc() |
|
450 | 450 | |
|
451 | 451 | if failed: |
|
452 | 452 | # Ensure that our exit code indicates failure |
|
453 | 453 | sys.exit(1) |
|
454 | 454 | |
|
455 | 455 | argparser = argparse.ArgumentParser(description='Run IPython test suite') |
|
456 | 456 | argparser.add_argument('testgroups', nargs='*', |
|
457 | 457 | help='Run specified groups of tests. If omitted, run ' |
|
458 | 458 | 'all tests.') |
|
459 | 459 | argparser.add_argument('--all', action='store_true', |
|
460 | 460 | help='Include slow tests not run by default.') |
|
461 | 461 | argparser.add_argument('--url', help="URL to use for the JS tests.") |
|
462 | 462 | argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int, |
|
463 | 463 | help='Run test sections in parallel. This starts as many ' |
|
464 | 464 | 'processes as you have cores, or you can specify a number.') |
|
465 | 465 | argparser.add_argument('--xunit', action='store_true', |
|
466 | 466 | help='Produce Xunit XML results') |
|
467 | 467 | argparser.add_argument('--coverage', nargs='?', const=True, default=False, |
|
468 | 468 | help="Measure test coverage. Specify 'html' or " |
|
469 | 469 | "'xml' to get reports.") |
|
470 | 470 | argparser.add_argument('--subproc-streams', default='capture', |
|
471 | 471 | help="What to do with stdout/stderr from subprocesses. " |
|
472 | 472 | "'capture' (default), 'show' and 'discard' are the options.") |
|
473 | 473 | |
|
474 | 474 | def default_options(): |
|
475 | 475 | """Get an argparse Namespace object with the default arguments, to pass to |
|
476 | 476 | :func:`run_iptestall`. |
|
477 | 477 | """ |
|
478 | 478 | options = argparser.parse_args([]) |
|
479 | 479 | options.extra_args = [] |
|
480 | 480 | return options |
|
481 | 481 | |
|
482 | 482 | def main(): |
|
483 | 483 | # iptest doesn't work correctly if the working directory is the |
|
484 | 484 | # root of the IPython source tree. Tell the user to avoid |
|
485 | 485 | # frustration. |
|
486 | 486 | if os.path.exists(os.path.join(os.getcwd(), |
|
487 | 487 | 'IPython', 'testing', '__main__.py')): |
|
488 | 488 | print("Don't run iptest from the IPython source directory", |
|
489 | 489 | file=sys.stderr) |
|
490 | 490 | sys.exit(1) |
|
491 | 491 | # Arguments after -- should be passed through to nose. Argparse treats |
|
492 | 492 | # everything after -- as regular positional arguments, so we separate them |
|
493 | 493 | # first. |
|
494 | 494 | try: |
|
495 | 495 | ix = sys.argv.index('--') |
|
496 | 496 | except ValueError: |
|
497 | 497 | to_parse = sys.argv[1:] |
|
498 | 498 | extra_args = [] |
|
499 | 499 | else: |
|
500 | 500 | to_parse = sys.argv[1:ix] |
|
501 | 501 | extra_args = sys.argv[ix+1:] |
|
502 | 502 | |
|
503 | 503 | options = argparser.parse_args(to_parse) |
|
504 | 504 | options.extra_args = extra_args |
|
505 | 505 | |
|
506 | 506 | run_iptestall(options) |
|
507 | 507 | |
|
508 | 508 | |
|
509 | 509 | if __name__ == '__main__': |
|
510 | 510 | main() |
@@ -1,468 +1,467 b'' | |||
|
1 | 1 | """Generic testing tools. |
|
2 | 2 | |
|
3 | 3 | Authors |
|
4 | 4 | ------- |
|
5 | 5 | - Fernando Perez <Fernando.Perez@berkeley.edu> |
|
6 | 6 | """ |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | # Copyright (c) IPython Development Team. |
|
10 | 10 | # Distributed under the terms of the Modified BSD License. |
|
11 | 11 | |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | import sys |
|
15 | 15 | import tempfile |
|
16 | 16 | |
|
17 | 17 | from contextlib import contextmanager |
|
18 | 18 | from io import StringIO |
|
19 | 19 | from subprocess import Popen, PIPE |
|
20 | 20 | from unittest.mock import patch |
|
21 | 21 | |
|
22 | 22 | try: |
|
23 | 23 | # These tools are used by parts of the runtime, so we make the nose |
|
24 | 24 | # dependency optional at this point. Nose is a hard dependency to run the |
|
25 | 25 | # test suite, but NOT to use ipython itself. |
|
26 | 26 | import nose.tools as nt |
|
27 | 27 | has_nose = True |
|
28 | 28 | except ImportError: |
|
29 | 29 | has_nose = False |
|
30 | 30 | |
|
31 | 31 | from traitlets.config.loader import Config |
|
32 | 32 | from IPython.utils.process import get_output_error_code |
|
33 | 33 | from IPython.utils.text import list_strings |
|
34 | 34 | from IPython.utils.io import temp_pyfile, Tee |
|
35 | 35 | from IPython.utils import py3compat |
|
36 | from IPython.utils.encoding import DEFAULT_ENCODING | |
|
37 | 36 | |
|
38 | 37 | from . import decorators as dec |
|
39 | 38 | from . import skipdoctest |
|
40 | 39 | |
|
41 | 40 | |
|
42 | 41 | # The docstring for full_path doctests differently on win32 (different path |
|
43 | 42 | # separator) so just skip the doctest there. The example remains informative. |
|
44 | 43 | doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco |
|
45 | 44 | |
|
46 | 45 | @doctest_deco |
|
47 | 46 | def full_path(startPath,files): |
|
48 | 47 | """Make full paths for all the listed files, based on startPath. |
|
49 | 48 | |
|
50 | 49 | Only the base part of startPath is kept, since this routine is typically |
|
51 | 50 | used with a script's ``__file__`` variable as startPath. The base of startPath |
|
52 | 51 | is then prepended to all the listed files, forming the output list. |
|
53 | 52 | |
|
54 | 53 | Parameters |
|
55 | 54 | ---------- |
|
56 | 55 | startPath : string |
|
57 | 56 | Initial path to use as the base for the results. This path is split |
|
58 | 57 | using os.path.split() and only its first component is kept. |
|
59 | 58 | |
|
60 | 59 | files : string or list |
|
61 | 60 | One or more files. |
|
62 | 61 | |
|
63 | 62 | Examples |
|
64 | 63 | -------- |
|
65 | 64 | |
|
66 | 65 | >>> full_path('/foo/bar.py',['a.txt','b.txt']) |
|
67 | 66 | ['/foo/a.txt', '/foo/b.txt'] |
|
68 | 67 | |
|
69 | 68 | >>> full_path('/foo',['a.txt','b.txt']) |
|
70 | 69 | ['/a.txt', '/b.txt'] |
|
71 | 70 | |
|
72 | 71 | If a single file is given, the output is still a list:: |
|
73 | 72 | |
|
74 | 73 | >>> full_path('/foo','a.txt') |
|
75 | 74 | ['/a.txt'] |
|
76 | 75 | """ |
|
77 | 76 | |
|
78 | 77 | files = list_strings(files) |
|
79 | 78 | base = os.path.split(startPath)[0] |
|
80 | 79 | return [ os.path.join(base,f) for f in files ] |
|
81 | 80 | |
|
82 | 81 | |
|
83 | 82 | def parse_test_output(txt): |
|
84 | 83 | """Parse the output of a test run and return errors, failures. |
|
85 | 84 | |
|
86 | 85 | Parameters |
|
87 | 86 | ---------- |
|
88 | 87 | txt : str |
|
89 | 88 | Text output of a test run, assumed to contain a line of one of the |
|
90 | 89 | following forms:: |
|
91 | 90 | |
|
92 | 91 | 'FAILED (errors=1)' |
|
93 | 92 | 'FAILED (failures=1)' |
|
94 | 93 | 'FAILED (errors=1, failures=1)' |
|
95 | 94 | |
|
96 | 95 | Returns |
|
97 | 96 | ------- |
|
98 | 97 | nerr, nfail |
|
99 | 98 | number of errors and failures. |
|
100 | 99 | """ |
|
101 | 100 | |
|
102 | 101 | err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE) |
|
103 | 102 | if err_m: |
|
104 | 103 | nerr = int(err_m.group(1)) |
|
105 | 104 | nfail = 0 |
|
106 | 105 | return nerr, nfail |
|
107 | 106 | |
|
108 | 107 | fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE) |
|
109 | 108 | if fail_m: |
|
110 | 109 | nerr = 0 |
|
111 | 110 | nfail = int(fail_m.group(1)) |
|
112 | 111 | return nerr, nfail |
|
113 | 112 | |
|
114 | 113 | both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt, |
|
115 | 114 | re.MULTILINE) |
|
116 | 115 | if both_m: |
|
117 | 116 | nerr = int(both_m.group(1)) |
|
118 | 117 | nfail = int(both_m.group(2)) |
|
119 | 118 | return nerr, nfail |
|
120 | 119 | |
|
121 | 120 | # If the input didn't match any of these forms, assume no error/failures |
|
122 | 121 | return 0, 0 |
|
123 | 122 | |
|
124 | 123 | |
|
125 | 124 | # So nose doesn't think this is a test |
|
126 | 125 | parse_test_output.__test__ = False |
|
127 | 126 | |
|
128 | 127 | |
|
129 | 128 | def default_argv(): |
|
130 | 129 | """Return a valid default argv for creating testing instances of ipython""" |
|
131 | 130 | |
|
132 | 131 | return ['--quick', # so no config file is loaded |
|
133 | 132 | # Other defaults to minimize side effects on stdout |
|
134 | 133 | '--colors=NoColor', '--no-term-title','--no-banner', |
|
135 | 134 | '--autocall=0'] |
|
136 | 135 | |
|
137 | 136 | |
|
138 | 137 | def default_config(): |
|
139 | 138 | """Return a config object with good defaults for testing.""" |
|
140 | 139 | config = Config() |
|
141 | 140 | config.TerminalInteractiveShell.colors = 'NoColor' |
|
142 | 141 | config.TerminalTerminalInteractiveShell.term_title = False, |
|
143 | 142 | config.TerminalInteractiveShell.autocall = 0 |
|
144 | 143 | f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False) |
|
145 | 144 | config.HistoryManager.hist_file = f.name |
|
146 | 145 | f.close() |
|
147 | 146 | config.HistoryManager.db_cache_size = 10000 |
|
148 | 147 | return config |
|
149 | 148 | |
|
150 | 149 | |
|
151 | 150 | def get_ipython_cmd(as_string=False): |
|
152 | 151 | """ |
|
153 | 152 | Return appropriate IPython command line name. By default, this will return |
|
154 | 153 | a list that can be used with subprocess.Popen, for example, but passing |
|
155 | 154 | `as_string=True` allows for returning the IPython command as a string. |
|
156 | 155 | |
|
157 | 156 | Parameters |
|
158 | 157 | ---------- |
|
159 | 158 | as_string: bool |
|
160 | 159 | Flag to allow to return the command as a string. |
|
161 | 160 | """ |
|
162 | 161 | ipython_cmd = [sys.executable, "-m", "IPython"] |
|
163 | 162 | |
|
164 | 163 | if as_string: |
|
165 | 164 | ipython_cmd = " ".join(ipython_cmd) |
|
166 | 165 | |
|
167 | 166 | return ipython_cmd |
|
168 | 167 | |
|
169 | 168 | def ipexec(fname, options=None, commands=()): |
|
170 | 169 | """Utility to call 'ipython filename'. |
|
171 | 170 | |
|
172 | 171 | Starts IPython with a minimal and safe configuration to make startup as fast |
|
173 | 172 | as possible. |
|
174 | 173 | |
|
175 | 174 | Note that this starts IPython in a subprocess! |
|
176 | 175 | |
|
177 | 176 | Parameters |
|
178 | 177 | ---------- |
|
179 | 178 | fname : str |
|
180 | 179 | Name of file to be executed (should have .py or .ipy extension). |
|
181 | 180 | |
|
182 | 181 | options : optional, list |
|
183 | 182 | Extra command-line flags to be passed to IPython. |
|
184 | 183 | |
|
185 | 184 | commands : optional, list |
|
186 | 185 | Commands to send in on stdin |
|
187 | 186 | |
|
188 | 187 | Returns |
|
189 | 188 | ------- |
|
190 | 189 | (stdout, stderr) of ipython subprocess. |
|
191 | 190 | """ |
|
192 | 191 | if options is None: options = [] |
|
193 | 192 | |
|
194 | 193 | cmdargs = default_argv() + options |
|
195 | 194 | |
|
196 | 195 | test_dir = os.path.dirname(__file__) |
|
197 | 196 | |
|
198 | 197 | ipython_cmd = get_ipython_cmd() |
|
199 | 198 | # Absolute path for filename |
|
200 | 199 | full_fname = os.path.join(test_dir, fname) |
|
201 | 200 | full_cmd = ipython_cmd + cmdargs + [full_fname] |
|
202 | 201 | env = os.environ.copy() |
|
203 | 202 | # FIXME: ignore all warnings in ipexec while we have shims |
|
204 | 203 | # should we keep suppressing warnings here, even after removing shims? |
|
205 | 204 | env['PYTHONWARNINGS'] = 'ignore' |
|
206 | 205 | # env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr |
|
207 | 206 | for k, v in env.items(): |
|
208 | 207 | # Debug a bizarre failure we've seen on Windows: |
|
209 | 208 | # TypeError: environment can only contain strings |
|
210 | 209 | if not isinstance(v, str): |
|
211 | 210 | print(k, v) |
|
212 | 211 | p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env) |
|
213 |
out, err = p.communicate(input=py3compat. |
|
|
214 |
out, err = py3compat. |
|
|
212 | out, err = p.communicate(input=py3compat.encode('\n'.join(commands)) or None) | |
|
213 | out, err = py3compat.decode(out), py3compat.decode(err) | |
|
215 | 214 | # `import readline` causes 'ESC[?1034h' to be output sometimes, |
|
216 | 215 | # so strip that out before doing comparisons |
|
217 | 216 | if out: |
|
218 | 217 | out = re.sub(r'\x1b\[[^h]+h', '', out) |
|
219 | 218 | return out, err |
|
220 | 219 | |
|
221 | 220 | |
|
222 | 221 | def ipexec_validate(fname, expected_out, expected_err='', |
|
223 | 222 | options=None, commands=()): |
|
224 | 223 | """Utility to call 'ipython filename' and validate output/error. |
|
225 | 224 | |
|
226 | 225 | This function raises an AssertionError if the validation fails. |
|
227 | 226 | |
|
228 | 227 | Note that this starts IPython in a subprocess! |
|
229 | 228 | |
|
230 | 229 | Parameters |
|
231 | 230 | ---------- |
|
232 | 231 | fname : str |
|
233 | 232 | Name of the file to be executed (should have .py or .ipy extension). |
|
234 | 233 | |
|
235 | 234 | expected_out : str |
|
236 | 235 | Expected stdout of the process. |
|
237 | 236 | |
|
238 | 237 | expected_err : optional, str |
|
239 | 238 | Expected stderr of the process. |
|
240 | 239 | |
|
241 | 240 | options : optional, list |
|
242 | 241 | Extra command-line flags to be passed to IPython. |
|
243 | 242 | |
|
244 | 243 | Returns |
|
245 | 244 | ------- |
|
246 | 245 | None |
|
247 | 246 | """ |
|
248 | 247 | |
|
249 | 248 | import nose.tools as nt |
|
250 | 249 | |
|
251 | 250 | out, err = ipexec(fname, options, commands) |
|
252 | 251 | #print 'OUT', out # dbg |
|
253 | 252 | #print 'ERR', err # dbg |
|
254 | 253 | # If there are any errors, we must check those befor stdout, as they may be |
|
255 | 254 | # more informative than simply having an empty stdout. |
|
256 | 255 | if err: |
|
257 | 256 | if expected_err: |
|
258 | 257 | nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines())) |
|
259 | 258 | else: |
|
260 | 259 | raise ValueError('Running file %r produced error: %r' % |
|
261 | 260 | (fname, err)) |
|
262 | 261 | # If no errors or output on stderr was expected, match stdout |
|
263 | 262 | nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines())) |
|
264 | 263 | |
|
265 | 264 | |
|
266 | 265 | class TempFileMixin(object): |
|
267 | 266 | """Utility class to create temporary Python/IPython files. |
|
268 | 267 | |
|
269 | 268 | Meant as a mixin class for test cases.""" |
|
270 | 269 | |
|
271 | 270 | def mktmp(self, src, ext='.py'): |
|
272 | 271 | """Make a valid python temp file.""" |
|
273 | 272 | fname, f = temp_pyfile(src, ext) |
|
274 | 273 | self.tmpfile = f |
|
275 | 274 | self.fname = fname |
|
276 | 275 | |
|
277 | 276 | def tearDown(self): |
|
278 | 277 | if hasattr(self, 'tmpfile'): |
|
279 | 278 | # If the tmpfile wasn't made because of skipped tests, like in |
|
280 | 279 | # win32, there's nothing to cleanup. |
|
281 | 280 | self.tmpfile.close() |
|
282 | 281 | try: |
|
283 | 282 | os.unlink(self.fname) |
|
284 | 283 | except: |
|
285 | 284 | # On Windows, even though we close the file, we still can't |
|
286 | 285 | # delete it. I have no clue why |
|
287 | 286 | pass |
|
288 | 287 | |
|
289 | 288 | def __enter__(self): |
|
290 | 289 | return self |
|
291 | 290 | |
|
292 | 291 | def __exit__(self, exc_type, exc_value, traceback): |
|
293 | 292 | self.tearDown() |
|
294 | 293 | |
|
295 | 294 | |
|
296 | 295 | pair_fail_msg = ("Testing {0}\n\n" |
|
297 | 296 | "In:\n" |
|
298 | 297 | " {1!r}\n" |
|
299 | 298 | "Expected:\n" |
|
300 | 299 | " {2!r}\n" |
|
301 | 300 | "Got:\n" |
|
302 | 301 | " {3!r}\n") |
|
303 | 302 | def check_pairs(func, pairs): |
|
304 | 303 | """Utility function for the common case of checking a function with a |
|
305 | 304 | sequence of input/output pairs. |
|
306 | 305 | |
|
307 | 306 | Parameters |
|
308 | 307 | ---------- |
|
309 | 308 | func : callable |
|
310 | 309 | The function to be tested. Should accept a single argument. |
|
311 | 310 | pairs : iterable |
|
312 | 311 | A list of (input, expected_output) tuples. |
|
313 | 312 | |
|
314 | 313 | Returns |
|
315 | 314 | ------- |
|
316 | 315 | None. Raises an AssertionError if any output does not match the expected |
|
317 | 316 | value. |
|
318 | 317 | """ |
|
319 | 318 | name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>")) |
|
320 | 319 | for inp, expected in pairs: |
|
321 | 320 | out = func(inp) |
|
322 | 321 | assert out == expected, pair_fail_msg.format(name, inp, expected, out) |
|
323 | 322 | |
|
324 | 323 | |
|
325 | 324 | MyStringIO = StringIO |
|
326 | 325 | |
|
327 | 326 | _re_type = type(re.compile(r'')) |
|
328 | 327 | |
|
329 | 328 | notprinted_msg = """Did not find {0!r} in printed output (on {1}): |
|
330 | 329 | ------- |
|
331 | 330 | {2!s} |
|
332 | 331 | ------- |
|
333 | 332 | """ |
|
334 | 333 | |
|
335 | 334 | class AssertPrints(object): |
|
336 | 335 | """Context manager for testing that code prints certain text. |
|
337 | 336 | |
|
338 | 337 | Examples |
|
339 | 338 | -------- |
|
340 | 339 | >>> with AssertPrints("abc", suppress=False): |
|
341 | 340 | ... print("abcd") |
|
342 | 341 | ... print("def") |
|
343 | 342 | ... |
|
344 | 343 | abcd |
|
345 | 344 | def |
|
346 | 345 | """ |
|
347 | 346 | def __init__(self, s, channel='stdout', suppress=True): |
|
348 | 347 | self.s = s |
|
349 | 348 | if isinstance(self.s, (str, _re_type)): |
|
350 | 349 | self.s = [self.s] |
|
351 | 350 | self.channel = channel |
|
352 | 351 | self.suppress = suppress |
|
353 | 352 | |
|
354 | 353 | def __enter__(self): |
|
355 | 354 | self.orig_stream = getattr(sys, self.channel) |
|
356 | 355 | self.buffer = MyStringIO() |
|
357 | 356 | self.tee = Tee(self.buffer, channel=self.channel) |
|
358 | 357 | setattr(sys, self.channel, self.buffer if self.suppress else self.tee) |
|
359 | 358 | |
|
360 | 359 | def __exit__(self, etype, value, traceback): |
|
361 | 360 | try: |
|
362 | 361 | if value is not None: |
|
363 | 362 | # If an error was raised, don't check anything else |
|
364 | 363 | return False |
|
365 | 364 | self.tee.flush() |
|
366 | 365 | setattr(sys, self.channel, self.orig_stream) |
|
367 | 366 | printed = self.buffer.getvalue() |
|
368 | 367 | for s in self.s: |
|
369 | 368 | if isinstance(s, _re_type): |
|
370 | 369 | assert s.search(printed), notprinted_msg.format(s.pattern, self.channel, printed) |
|
371 | 370 | else: |
|
372 | 371 | assert s in printed, notprinted_msg.format(s, self.channel, printed) |
|
373 | 372 | return False |
|
374 | 373 | finally: |
|
375 | 374 | self.tee.close() |
|
376 | 375 | |
|
377 | 376 | printed_msg = """Found {0!r} in printed output (on {1}): |
|
378 | 377 | ------- |
|
379 | 378 | {2!s} |
|
380 | 379 | ------- |
|
381 | 380 | """ |
|
382 | 381 | |
|
383 | 382 | class AssertNotPrints(AssertPrints): |
|
384 | 383 | """Context manager for checking that certain output *isn't* produced. |
|
385 | 384 | |
|
386 | 385 | Counterpart of AssertPrints""" |
|
387 | 386 | def __exit__(self, etype, value, traceback): |
|
388 | 387 | try: |
|
389 | 388 | if value is not None: |
|
390 | 389 | # If an error was raised, don't check anything else |
|
391 | 390 | self.tee.close() |
|
392 | 391 | return False |
|
393 | 392 | self.tee.flush() |
|
394 | 393 | setattr(sys, self.channel, self.orig_stream) |
|
395 | 394 | printed = self.buffer.getvalue() |
|
396 | 395 | for s in self.s: |
|
397 | 396 | if isinstance(s, _re_type): |
|
398 | 397 | assert not s.search(printed),printed_msg.format( |
|
399 | 398 | s.pattern, self.channel, printed) |
|
400 | 399 | else: |
|
401 | 400 | assert s not in printed, printed_msg.format( |
|
402 | 401 | s, self.channel, printed) |
|
403 | 402 | return False |
|
404 | 403 | finally: |
|
405 | 404 | self.tee.close() |
|
406 | 405 | |
|
407 | 406 | @contextmanager |
|
408 | 407 | def mute_warn(): |
|
409 | 408 | from IPython.utils import warn |
|
410 | 409 | save_warn = warn.warn |
|
411 | 410 | warn.warn = lambda *a, **kw: None |
|
412 | 411 | try: |
|
413 | 412 | yield |
|
414 | 413 | finally: |
|
415 | 414 | warn.warn = save_warn |
|
416 | 415 | |
|
417 | 416 | @contextmanager |
|
418 | 417 | def make_tempfile(name): |
|
419 | 418 | """ Create an empty, named, temporary file for the duration of the context. |
|
420 | 419 | """ |
|
421 | 420 | f = open(name, 'w') |
|
422 | 421 | f.close() |
|
423 | 422 | try: |
|
424 | 423 | yield |
|
425 | 424 | finally: |
|
426 | 425 | os.unlink(name) |
|
427 | 426 | |
|
428 | 427 | def fake_input(inputs): |
|
429 | 428 | """Temporarily replace the input() function to return the given values |
|
430 | 429 | |
|
431 | 430 | Use as a context manager: |
|
432 | 431 | |
|
433 | 432 | with fake_input(['result1', 'result2']): |
|
434 | 433 | ... |
|
435 | 434 | |
|
436 | 435 | Values are returned in order. If input() is called again after the last value |
|
437 | 436 | was used, EOFError is raised. |
|
438 | 437 | """ |
|
439 | 438 | it = iter(inputs) |
|
440 | 439 | def mock_input(prompt=''): |
|
441 | 440 | try: |
|
442 | 441 | return next(it) |
|
443 | 442 | except StopIteration: |
|
444 | 443 | raise EOFError('No more inputs given') |
|
445 | 444 | |
|
446 | 445 | return patch('builtins.input', mock_input) |
|
447 | 446 | |
|
448 | 447 | def help_output_test(subcommand=''): |
|
449 | 448 | """test that `ipython [subcommand] -h` works""" |
|
450 | 449 | cmd = get_ipython_cmd() + [subcommand, '-h'] |
|
451 | 450 | out, err, rc = get_output_error_code(cmd) |
|
452 | 451 | nt.assert_equal(rc, 0, err) |
|
453 | 452 | nt.assert_not_in("Traceback", err) |
|
454 | 453 | nt.assert_in("Options", out) |
|
455 | 454 | nt.assert_in("--help-all", out) |
|
456 | 455 | return out, err |
|
457 | 456 | |
|
458 | 457 | |
|
459 | 458 | def help_all_output_test(subcommand=''): |
|
460 | 459 | """test that `ipython [subcommand] --help-all` works""" |
|
461 | 460 | cmd = get_ipython_cmd() + [subcommand, '--help-all'] |
|
462 | 461 | out, err, rc = get_output_error_code(cmd) |
|
463 | 462 | nt.assert_equal(rc, 0, err) |
|
464 | 463 | nt.assert_not_in("Traceback", err) |
|
465 | 464 | nt.assert_in("Options", out) |
|
466 | 465 | nt.assert_in("Class", out) |
|
467 | 466 | return out, err |
|
468 | 467 |
@@ -1,78 +1,78 b'' | |||
|
1 | 1 | """cli-specific implementation of process utilities. |
|
2 | 2 | |
|
3 | 3 | cli - Common Language Infrastructure for IronPython. Code |
|
4 | 4 | can run on any operating system. Check os.name for os- |
|
5 | 5 | specific settings. |
|
6 | 6 | |
|
7 | 7 | This file is only meant to be imported by process.py, not by end-users. |
|
8 | 8 | |
|
9 | 9 | This file is largely untested. To become a full drop-in process |
|
10 | 10 | interface for IronPython will probably require you to help fill |
|
11 | 11 | in the details. |
|
12 | 12 | """ |
|
13 | 13 | |
|
14 | 14 | # Import cli libraries: |
|
15 | 15 | import clr |
|
16 | 16 | import System |
|
17 | 17 | |
|
18 | 18 | # Import Python libraries: |
|
19 | 19 | import os |
|
20 | 20 | |
|
21 | 21 | # Import IPython libraries: |
|
22 | 22 | from IPython.utils import py3compat |
|
23 | 23 | from ._process_common import arg_split |
|
24 | 24 | |
|
25 | 25 | def _find_cmd(cmd): |
|
26 | 26 | """Find the full path to a command using which.""" |
|
27 | 27 | paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep) |
|
28 | 28 | for path in paths: |
|
29 | 29 | filename = os.path.join(path, cmd) |
|
30 | 30 | if System.IO.File.Exists(filename): |
|
31 |
return py3compat. |
|
|
31 | return py3compat.decode(filename) | |
|
32 | 32 | raise OSError("command %r not found" % cmd) |
|
33 | 33 | |
|
34 | 34 | def system(cmd): |
|
35 | 35 | """ |
|
36 | 36 | system(cmd) should work in a cli environment on Mac OSX, Linux, |
|
37 | 37 | and Windows |
|
38 | 38 | """ |
|
39 | 39 | psi = System.Diagnostics.ProcessStartInfo(cmd) |
|
40 | 40 | psi.RedirectStandardOutput = True |
|
41 | 41 | psi.RedirectStandardError = True |
|
42 | 42 | psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal |
|
43 | 43 | psi.UseShellExecute = False |
|
44 | 44 | # Start up process: |
|
45 | 45 | reg = System.Diagnostics.Process.Start(psi) |
|
46 | 46 | |
|
47 | 47 | def getoutput(cmd): |
|
48 | 48 | """ |
|
49 | 49 | getoutput(cmd) should work in a cli environment on Mac OSX, Linux, |
|
50 | 50 | and Windows |
|
51 | 51 | """ |
|
52 | 52 | psi = System.Diagnostics.ProcessStartInfo(cmd) |
|
53 | 53 | psi.RedirectStandardOutput = True |
|
54 | 54 | psi.RedirectStandardError = True |
|
55 | 55 | psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal |
|
56 | 56 | psi.UseShellExecute = False |
|
57 | 57 | # Start up process: |
|
58 | 58 | reg = System.Diagnostics.Process.Start(psi) |
|
59 | 59 | myOutput = reg.StandardOutput |
|
60 | 60 | output = myOutput.ReadToEnd() |
|
61 | 61 | myError = reg.StandardError |
|
62 | 62 | error = myError.ReadToEnd() |
|
63 | 63 | return output |
|
64 | 64 | |
|
65 | 65 | def check_pid(pid): |
|
66 | 66 | """ |
|
67 | 67 | Check if a process with the given PID (pid) exists |
|
68 | 68 | """ |
|
69 | 69 | try: |
|
70 | 70 | System.Diagnostics.Process.GetProcessById(pid) |
|
71 | 71 | # process with given pid is running |
|
72 | 72 | return True |
|
73 | 73 | except System.InvalidOperationException: |
|
74 | 74 | # process wasn't started by this object (but is running) |
|
75 | 75 | return True |
|
76 | 76 | except System.ArgumentException: |
|
77 | 77 | # process with given pid isn't running |
|
78 | 78 | return False |
@@ -1,212 +1,212 b'' | |||
|
1 | 1 | """Common utilities for the various process_* implementations. |
|
2 | 2 | |
|
3 | 3 | This file is only meant to be imported by the platform-specific implementations |
|
4 | 4 | of subprocess utilities, and it contains tools that are common to all of them. |
|
5 | 5 | """ |
|
6 | 6 | |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | # Copyright (C) 2010-2011 The IPython Development Team |
|
9 | 9 | # |
|
10 | 10 | # Distributed under the terms of the BSD License. The full license is in |
|
11 | 11 | # the file COPYING, distributed as part of this software. |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | # Imports |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | import subprocess |
|
18 | 18 | import shlex |
|
19 | 19 | import sys |
|
20 | 20 | import os |
|
21 | 21 | |
|
22 | 22 | from IPython.utils import py3compat |
|
23 | 23 | |
|
24 | 24 | #----------------------------------------------------------------------------- |
|
25 | 25 | # Function definitions |
|
26 | 26 | #----------------------------------------------------------------------------- |
|
27 | 27 | |
|
28 | 28 | def read_no_interrupt(p): |
|
29 | 29 | """Read from a pipe ignoring EINTR errors. |
|
30 | 30 | |
|
31 | 31 | This is necessary because when reading from pipes with GUI event loops |
|
32 | 32 | running in the background, often interrupts are raised that stop the |
|
33 | 33 | command from completing.""" |
|
34 | 34 | import errno |
|
35 | 35 | |
|
36 | 36 | try: |
|
37 | 37 | return p.read() |
|
38 | 38 | except IOError as err: |
|
39 | 39 | if err.errno != errno.EINTR: |
|
40 | 40 | raise |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | def process_handler(cmd, callback, stderr=subprocess.PIPE): |
|
44 | 44 | """Open a command in a shell subprocess and execute a callback. |
|
45 | 45 | |
|
46 | 46 | This function provides common scaffolding for creating subprocess.Popen() |
|
47 | 47 | calls. It creates a Popen object and then calls the callback with it. |
|
48 | 48 | |
|
49 | 49 | Parameters |
|
50 | 50 | ---------- |
|
51 | 51 | cmd : str or list |
|
52 | 52 | A command to be executed by the system, using :class:`subprocess.Popen`. |
|
53 | 53 | If a string is passed, it will be run in the system shell. If a list is |
|
54 | 54 | passed, it will be used directly as arguments. |
|
55 | 55 | |
|
56 | 56 | callback : callable |
|
57 | 57 | A one-argument function that will be called with the Popen object. |
|
58 | 58 | |
|
59 | 59 | stderr : file descriptor number, optional |
|
60 | 60 | By default this is set to ``subprocess.PIPE``, but you can also pass the |
|
61 | 61 | value ``subprocess.STDOUT`` to force the subprocess' stderr to go into |
|
62 | 62 | the same file descriptor as its stdout. This is useful to read stdout |
|
63 | 63 | and stderr combined in the order they are generated. |
|
64 | 64 | |
|
65 | 65 | Returns |
|
66 | 66 | ------- |
|
67 | 67 | The return value of the provided callback is returned. |
|
68 | 68 | """ |
|
69 | 69 | sys.stdout.flush() |
|
70 | 70 | sys.stderr.flush() |
|
71 | 71 | # On win32, close_fds can't be true when using pipes for stdin/out/err |
|
72 | 72 | close_fds = sys.platform != 'win32' |
|
73 | 73 | # Determine if cmd should be run with system shell. |
|
74 | 74 | shell = isinstance(cmd, str) |
|
75 | 75 | # On POSIX systems run shell commands with user-preferred shell. |
|
76 | 76 | executable = None |
|
77 | 77 | if shell and os.name == 'posix' and 'SHELL' in os.environ: |
|
78 | 78 | executable = os.environ['SHELL'] |
|
79 | 79 | p = subprocess.Popen(cmd, shell=shell, |
|
80 | 80 | executable=executable, |
|
81 | 81 | stdin=subprocess.PIPE, |
|
82 | 82 | stdout=subprocess.PIPE, |
|
83 | 83 | stderr=stderr, |
|
84 | 84 | close_fds=close_fds) |
|
85 | 85 | |
|
86 | 86 | try: |
|
87 | 87 | out = callback(p) |
|
88 | 88 | except KeyboardInterrupt: |
|
89 | 89 | print('^C') |
|
90 | 90 | sys.stdout.flush() |
|
91 | 91 | sys.stderr.flush() |
|
92 | 92 | out = None |
|
93 | 93 | finally: |
|
94 | 94 | # Make really sure that we don't leave processes behind, in case the |
|
95 | 95 | # call above raises an exception |
|
96 | 96 | # We start by assuming the subprocess finished (to avoid NameErrors |
|
97 | 97 | # later depending on the path taken) |
|
98 | 98 | if p.returncode is None: |
|
99 | 99 | try: |
|
100 | 100 | p.terminate() |
|
101 | 101 | p.poll() |
|
102 | 102 | except OSError: |
|
103 | 103 | pass |
|
104 | 104 | # One last try on our way out |
|
105 | 105 | if p.returncode is None: |
|
106 | 106 | try: |
|
107 | 107 | p.kill() |
|
108 | 108 | except OSError: |
|
109 | 109 | pass |
|
110 | 110 | |
|
111 | 111 | return out |
|
112 | 112 | |
|
113 | 113 | |
|
114 | 114 | def getoutput(cmd): |
|
115 | 115 | """Run a command and return its stdout/stderr as a string. |
|
116 | 116 | |
|
117 | 117 | Parameters |
|
118 | 118 | ---------- |
|
119 | 119 | cmd : str or list |
|
120 | 120 | A command to be executed in the system shell. |
|
121 | 121 | |
|
122 | 122 | Returns |
|
123 | 123 | ------- |
|
124 | 124 | output : str |
|
125 | 125 | A string containing the combination of stdout and stderr from the |
|
126 | 126 | subprocess, in whatever order the subprocess originally wrote to its |
|
127 | 127 | file descriptors (so the order of the information in this string is the |
|
128 | 128 | correct order as would be seen if running the command in a terminal). |
|
129 | 129 | """ |
|
130 | 130 | out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT) |
|
131 | 131 | if out is None: |
|
132 | 132 | return '' |
|
133 |
return py3compat. |
|
|
133 | return py3compat.decode(out) | |
|
134 | 134 | |
|
135 | 135 | |
|
136 | 136 | def getoutputerror(cmd): |
|
137 | 137 | """Return (standard output, standard error) of executing cmd in a shell. |
|
138 | 138 | |
|
139 | 139 | Accepts the same arguments as os.system(). |
|
140 | 140 | |
|
141 | 141 | Parameters |
|
142 | 142 | ---------- |
|
143 | 143 | cmd : str or list |
|
144 | 144 | A command to be executed in the system shell. |
|
145 | 145 | |
|
146 | 146 | Returns |
|
147 | 147 | ------- |
|
148 | 148 | stdout : str |
|
149 | 149 | stderr : str |
|
150 | 150 | """ |
|
151 | 151 | return get_output_error_code(cmd)[:2] |
|
152 | 152 | |
|
153 | 153 | def get_output_error_code(cmd): |
|
154 | 154 | """Return (standard output, standard error, return code) of executing cmd |
|
155 | 155 | in a shell. |
|
156 | 156 | |
|
157 | 157 | Accepts the same arguments as os.system(). |
|
158 | 158 | |
|
159 | 159 | Parameters |
|
160 | 160 | ---------- |
|
161 | 161 | cmd : str or list |
|
162 | 162 | A command to be executed in the system shell. |
|
163 | 163 | |
|
164 | 164 | Returns |
|
165 | 165 | ------- |
|
166 | 166 | stdout : str |
|
167 | 167 | stderr : str |
|
168 | 168 | returncode: int |
|
169 | 169 | """ |
|
170 | 170 | |
|
171 | 171 | out_err, p = process_handler(cmd, lambda p: (p.communicate(), p)) |
|
172 | 172 | if out_err is None: |
|
173 | 173 | return '', '', p.returncode |
|
174 | 174 | out, err = out_err |
|
175 |
return py3compat. |
|
|
175 | return py3compat.decode(out), py3compat.decode(err), p.returncode | |
|
176 | 176 | |
|
177 | 177 | def arg_split(s, posix=False, strict=True): |
|
178 | 178 | """Split a command line's arguments in a shell-like manner. |
|
179 | 179 | |
|
180 | 180 | This is a modified version of the standard library's shlex.split() |
|
181 | 181 | function, but with a default of posix=False for splitting, so that quotes |
|
182 | 182 | in inputs are respected. |
|
183 | 183 | |
|
184 | 184 | if strict=False, then any errors shlex.split would raise will result in the |
|
185 | 185 | unparsed remainder being the last element of the list, rather than raising. |
|
186 | 186 | This is because we sometimes use arg_split to parse things other than |
|
187 | 187 | command-line args. |
|
188 | 188 | """ |
|
189 | 189 | |
|
190 | 190 | lex = shlex.shlex(s, posix=posix) |
|
191 | 191 | lex.whitespace_split = True |
|
192 | 192 | # Extract tokens, ensuring that things like leaving open quotes |
|
193 | 193 | # does not cause this to raise. This is important, because we |
|
194 | 194 | # sometimes pass Python source through this (e.g. %timeit f(" ")), |
|
195 | 195 | # and it shouldn't raise an exception. |
|
196 | 196 | # It may be a bad idea to parse things that are not command-line args |
|
197 | 197 | # through this function, but we do, so let's be safe about it. |
|
198 | 198 | lex.commenters='' #fix for GH-1269 |
|
199 | 199 | tokens = [] |
|
200 | 200 | while True: |
|
201 | 201 | try: |
|
202 | 202 | tokens.append(next(lex)) |
|
203 | 203 | except StopIteration: |
|
204 | 204 | break |
|
205 | 205 | except ValueError: |
|
206 | 206 | if strict: |
|
207 | 207 | raise |
|
208 | 208 | # couldn't parse, get remaining blob as last token |
|
209 | 209 | tokens.append(lex.token) |
|
210 | 210 | break |
|
211 | 211 | |
|
212 | 212 | return tokens |
@@ -1,224 +1,224 b'' | |||
|
1 | 1 | """Posix-specific implementation of process utilities. |
|
2 | 2 | |
|
3 | 3 | This file is only meant to be imported by process.py, not by end-users. |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Copyright (C) 2010-2011 The IPython Development Team |
|
8 | 8 | # |
|
9 | 9 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | 10 | # the file COPYING, distributed as part of this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Imports |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | # Stdlib |
|
18 | 18 | import errno |
|
19 | 19 | import os |
|
20 | 20 | import subprocess as sp |
|
21 | 21 | import sys |
|
22 | 22 | |
|
23 | 23 | import pexpect |
|
24 | 24 | |
|
25 | 25 | # Our own |
|
26 | 26 | from ._process_common import getoutput, arg_split |
|
27 | 27 | from IPython.utils import py3compat |
|
28 | 28 | from IPython.utils.encoding import DEFAULT_ENCODING |
|
29 | 29 | |
|
30 | 30 | #----------------------------------------------------------------------------- |
|
31 | 31 | # Function definitions |
|
32 | 32 | #----------------------------------------------------------------------------- |
|
33 | 33 | |
|
34 | 34 | def _find_cmd(cmd): |
|
35 | 35 | """Find the full path to a command using which.""" |
|
36 | 36 | |
|
37 | 37 | path = sp.Popen(['/usr/bin/env', 'which', cmd], |
|
38 | 38 | stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] |
|
39 |
return py3compat. |
|
|
39 | return py3compat.decode(path) | |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | class ProcessHandler(object): |
|
43 | 43 | """Execute subprocesses under the control of pexpect. |
|
44 | 44 | """ |
|
45 | 45 | # Timeout in seconds to wait on each reading of the subprocess' output. |
|
46 | 46 | # This should not be set too low to avoid cpu overusage from our side, |
|
47 | 47 | # since we read in a loop whose period is controlled by this timeout. |
|
48 | 48 | read_timeout = 0.05 |
|
49 | 49 | |
|
50 | 50 | # Timeout to give a process if we receive SIGINT, between sending the |
|
51 | 51 | # SIGINT to the process and forcefully terminating it. |
|
52 | 52 | terminate_timeout = 0.2 |
|
53 | 53 | |
|
54 | 54 | # File object where stdout and stderr of the subprocess will be written |
|
55 | 55 | logfile = None |
|
56 | 56 | |
|
57 | 57 | # Shell to call for subprocesses to execute |
|
58 | 58 | _sh = None |
|
59 | 59 | |
|
60 | 60 | @property |
|
61 | 61 | def sh(self): |
|
62 | 62 | if self._sh is None: |
|
63 | 63 | self._sh = pexpect.which('sh') |
|
64 | 64 | if self._sh is None: |
|
65 | 65 | raise OSError('"sh" shell not found') |
|
66 | 66 | |
|
67 | 67 | return self._sh |
|
68 | 68 | |
|
69 | 69 | def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None): |
|
70 | 70 | """Arguments are used for pexpect calls.""" |
|
71 | 71 | self.read_timeout = (ProcessHandler.read_timeout if read_timeout is |
|
72 | 72 | None else read_timeout) |
|
73 | 73 | self.terminate_timeout = (ProcessHandler.terminate_timeout if |
|
74 | 74 | terminate_timeout is None else |
|
75 | 75 | terminate_timeout) |
|
76 | 76 | self.logfile = sys.stdout if logfile is None else logfile |
|
77 | 77 | |
|
78 | 78 | def getoutput(self, cmd): |
|
79 | 79 | """Run a command and return its stdout/stderr as a string. |
|
80 | 80 | |
|
81 | 81 | Parameters |
|
82 | 82 | ---------- |
|
83 | 83 | cmd : str |
|
84 | 84 | A command to be executed in the system shell. |
|
85 | 85 | |
|
86 | 86 | Returns |
|
87 | 87 | ------- |
|
88 | 88 | output : str |
|
89 | 89 | A string containing the combination of stdout and stderr from the |
|
90 | 90 | subprocess, in whatever order the subprocess originally wrote to its |
|
91 | 91 | file descriptors (so the order of the information in this string is the |
|
92 | 92 | correct order as would be seen if running the command in a terminal). |
|
93 | 93 | """ |
|
94 | 94 | try: |
|
95 | 95 | return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') |
|
96 | 96 | except KeyboardInterrupt: |
|
97 | 97 | print('^C', file=sys.stderr, end='') |
|
98 | 98 | |
|
99 | 99 | def getoutput_pexpect(self, cmd): |
|
100 | 100 | """Run a command and return its stdout/stderr as a string. |
|
101 | 101 | |
|
102 | 102 | Parameters |
|
103 | 103 | ---------- |
|
104 | 104 | cmd : str |
|
105 | 105 | A command to be executed in the system shell. |
|
106 | 106 | |
|
107 | 107 | Returns |
|
108 | 108 | ------- |
|
109 | 109 | output : str |
|
110 | 110 | A string containing the combination of stdout and stderr from the |
|
111 | 111 | subprocess, in whatever order the subprocess originally wrote to its |
|
112 | 112 | file descriptors (so the order of the information in this string is the |
|
113 | 113 | correct order as would be seen if running the command in a terminal). |
|
114 | 114 | """ |
|
115 | 115 | try: |
|
116 | 116 | return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n') |
|
117 | 117 | except KeyboardInterrupt: |
|
118 | 118 | print('^C', file=sys.stderr, end='') |
|
119 | 119 | |
|
120 | 120 | def system(self, cmd): |
|
121 | 121 | """Execute a command in a subshell. |
|
122 | 122 | |
|
123 | 123 | Parameters |
|
124 | 124 | ---------- |
|
125 | 125 | cmd : str |
|
126 | 126 | A command to be executed in the system shell. |
|
127 | 127 | |
|
128 | 128 | Returns |
|
129 | 129 | ------- |
|
130 | 130 | int : child's exitstatus |
|
131 | 131 | """ |
|
132 | 132 | # Get likely encoding for the output. |
|
133 | 133 | enc = DEFAULT_ENCODING |
|
134 | 134 | |
|
135 | 135 | # Patterns to match on the output, for pexpect. We read input and |
|
136 | 136 | # allow either a short timeout or EOF |
|
137 | 137 | patterns = [pexpect.TIMEOUT, pexpect.EOF] |
|
138 | 138 | # the index of the EOF pattern in the list. |
|
139 | 139 | # even though we know it's 1, this call means we don't have to worry if |
|
140 | 140 | # we change the above list, and forget to change this value: |
|
141 | 141 | EOF_index = patterns.index(pexpect.EOF) |
|
142 | 142 | # The size of the output stored so far in the process output buffer. |
|
143 | 143 | # Since pexpect only appends to this buffer, each time we print we |
|
144 | 144 | # record how far we've printed, so that next time we only print *new* |
|
145 | 145 | # content from the buffer. |
|
146 | 146 | out_size = 0 |
|
147 | 147 | try: |
|
148 | 148 | # Since we're not really searching the buffer for text patterns, we |
|
149 | 149 | # can set pexpect's search window to be tiny and it won't matter. |
|
150 | 150 | # We only search for the 'patterns' timeout or EOF, which aren't in |
|
151 | 151 | # the text itself. |
|
152 | 152 | #child = pexpect.spawn(pcmd, searchwindowsize=1) |
|
153 | 153 | if hasattr(pexpect, 'spawnb'): |
|
154 | 154 | child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U |
|
155 | 155 | else: |
|
156 | 156 | child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect |
|
157 | 157 | flush = sys.stdout.flush |
|
158 | 158 | while True: |
|
159 | 159 | # res is the index of the pattern that caused the match, so we |
|
160 | 160 | # know whether we've finished (if we matched EOF) or not |
|
161 | 161 | res_idx = child.expect_list(patterns, self.read_timeout) |
|
162 | 162 | print(child.before[out_size:].decode(enc, 'replace'), end='') |
|
163 | 163 | flush() |
|
164 | 164 | if res_idx==EOF_index: |
|
165 | 165 | break |
|
166 | 166 | # Update the pointer to what we've already printed |
|
167 | 167 | out_size = len(child.before) |
|
168 | 168 | except KeyboardInterrupt: |
|
169 | 169 | # We need to send ^C to the process. The ascii code for '^C' is 3 |
|
170 | 170 | # (the character is known as ETX for 'End of Text', see |
|
171 | 171 | # curses.ascii.ETX). |
|
172 | 172 | child.sendline(chr(3)) |
|
173 | 173 | # Read and print any more output the program might produce on its |
|
174 | 174 | # way out. |
|
175 | 175 | try: |
|
176 | 176 | out_size = len(child.before) |
|
177 | 177 | child.expect_list(patterns, self.terminate_timeout) |
|
178 | 178 | print(child.before[out_size:].decode(enc, 'replace'), end='') |
|
179 | 179 | sys.stdout.flush() |
|
180 | 180 | except KeyboardInterrupt: |
|
181 | 181 | # Impatient users tend to type it multiple times |
|
182 | 182 | pass |
|
183 | 183 | finally: |
|
184 | 184 | # Ensure the subprocess really is terminated |
|
185 | 185 | child.terminate(force=True) |
|
186 | 186 | # add isalive check, to ensure exitstatus is set: |
|
187 | 187 | child.isalive() |
|
188 | 188 | |
|
189 | 189 | # We follow the subprocess pattern, returning either the exit status |
|
190 | 190 | # as a positive number, or the terminating signal as a negative |
|
191 | 191 | # number. |
|
192 | 192 | # on Linux, sh returns 128+n for signals terminating child processes on Linux |
|
193 | 193 | # on BSD (OS X), the signal code is set instead |
|
194 | 194 | if child.exitstatus is None: |
|
195 | 195 | # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None |
|
196 | 196 | if child.signalstatus is None: |
|
197 | 197 | # this condition may never occur, |
|
198 | 198 | # but let's be certain we always return an integer. |
|
199 | 199 | return 0 |
|
200 | 200 | return -child.signalstatus |
|
201 | 201 | if child.exitstatus > 128: |
|
202 | 202 | return -(child.exitstatus - 128) |
|
203 | 203 | return child.exitstatus |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | # Make system() with a functional interface for outside use. Note that we use |
|
207 | 207 | # getoutput() from the _common utils, which is built on top of popen(). Using |
|
208 | 208 | # pexpect to get subprocess output produces difficult to parse output, since |
|
209 | 209 | # programs think they are talking to a tty and produce highly formatted output |
|
210 | 210 | # (ls is a good example) that makes them hard. |
|
211 | 211 | system = ProcessHandler().system |
|
212 | 212 | |
|
213 | 213 | def check_pid(pid): |
|
214 | 214 | try: |
|
215 | 215 | os.kill(pid, 0) |
|
216 | 216 | except OSError as err: |
|
217 | 217 | if err.errno == errno.ESRCH: |
|
218 | 218 | return False |
|
219 | 219 | elif err.errno == errno.EPERM: |
|
220 | 220 | # Don't have permission to signal the process - probably means it exists |
|
221 | 221 | return True |
|
222 | 222 | raise |
|
223 | 223 | else: |
|
224 | 224 | return True |
@@ -1,191 +1,191 b'' | |||
|
1 | 1 | """Windows-specific implementation of process utilities. |
|
2 | 2 | |
|
3 | 3 | This file is only meant to be imported by process.py, not by end-users. |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Copyright (C) 2010-2011 The IPython Development Team |
|
8 | 8 | # |
|
9 | 9 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | 10 | # the file COPYING, distributed as part of this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Imports |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | # stdlib |
|
18 | 18 | import os |
|
19 | 19 | import sys |
|
20 | 20 | import ctypes |
|
21 | 21 | |
|
22 | 22 | from ctypes import c_int, POINTER |
|
23 | 23 | from ctypes.wintypes import LPCWSTR, HLOCAL |
|
24 | 24 | from subprocess import STDOUT |
|
25 | 25 | |
|
26 | 26 | # our own imports |
|
27 | 27 | from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split |
|
28 | 28 | from . import py3compat |
|
29 | 29 | from .encoding import DEFAULT_ENCODING |
|
30 | 30 | |
|
31 | 31 | #----------------------------------------------------------------------------- |
|
32 | 32 | # Function definitions |
|
33 | 33 | #----------------------------------------------------------------------------- |
|
34 | 34 | |
|
35 | 35 | class AvoidUNCPath(object): |
|
36 | 36 | """A context manager to protect command execution from UNC paths. |
|
37 | 37 | |
|
38 | 38 | In the Win32 API, commands can't be invoked with the cwd being a UNC path. |
|
39 | 39 | This context manager temporarily changes directory to the 'C:' drive on |
|
40 | 40 | entering, and restores the original working directory on exit. |
|
41 | 41 | |
|
42 | 42 | The context manager returns the starting working directory *if* it made a |
|
43 | 43 | change and None otherwise, so that users can apply the necessary adjustment |
|
44 | 44 | to their system calls in the event of a change. |
|
45 | 45 | |
|
46 | 46 | Examples |
|
47 | 47 | -------- |
|
48 | 48 | :: |
|
49 | 49 | cmd = 'dir' |
|
50 | 50 | with AvoidUNCPath() as path: |
|
51 | 51 | if path is not None: |
|
52 | 52 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
53 | 53 | os.system(cmd) |
|
54 | 54 | """ |
|
55 | 55 | def __enter__(self): |
|
56 | 56 | self.path = os.getcwd() |
|
57 | 57 | self.is_unc_path = self.path.startswith(r"\\") |
|
58 | 58 | if self.is_unc_path: |
|
59 | 59 | # change to c drive (as cmd.exe cannot handle UNC addresses) |
|
60 | 60 | os.chdir("C:") |
|
61 | 61 | return self.path |
|
62 | 62 | else: |
|
63 | 63 | # We return None to signal that there was no change in the working |
|
64 | 64 | # directory |
|
65 | 65 | return None |
|
66 | 66 | |
|
67 | 67 | def __exit__(self, exc_type, exc_value, traceback): |
|
68 | 68 | if self.is_unc_path: |
|
69 | 69 | os.chdir(self.path) |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | def _find_cmd(cmd): |
|
73 | 73 | """Find the full path to a .bat or .exe using the win32api module.""" |
|
74 | 74 | try: |
|
75 | 75 | from win32api import SearchPath |
|
76 | 76 | except ImportError: |
|
77 | 77 | raise ImportError('you need to have pywin32 installed for this to work') |
|
78 | 78 | else: |
|
79 | 79 | PATH = os.environ['PATH'] |
|
80 | 80 | extensions = ['.exe', '.com', '.bat', '.py'] |
|
81 | 81 | path = None |
|
82 | 82 | for ext in extensions: |
|
83 | 83 | try: |
|
84 | 84 | path = SearchPath(PATH, cmd, ext)[0] |
|
85 | 85 | except: |
|
86 | 86 | pass |
|
87 | 87 | if path is None: |
|
88 | 88 | raise OSError("command %r not found" % cmd) |
|
89 | 89 | else: |
|
90 | 90 | return path |
|
91 | 91 | |
|
92 | 92 | |
|
93 | 93 | def _system_body(p): |
|
94 | 94 | """Callback for _system.""" |
|
95 | 95 | enc = DEFAULT_ENCODING |
|
96 | 96 | for line in read_no_interrupt(p.stdout).splitlines(): |
|
97 | 97 | line = line.decode(enc, 'replace') |
|
98 | 98 | print(line, file=sys.stdout) |
|
99 | 99 | for line in read_no_interrupt(p.stderr).splitlines(): |
|
100 | 100 | line = line.decode(enc, 'replace') |
|
101 | 101 | print(line, file=sys.stderr) |
|
102 | 102 | |
|
103 | 103 | # Wait to finish for returncode |
|
104 | 104 | return p.wait() |
|
105 | 105 | |
|
106 | 106 | |
|
107 | 107 | def system(cmd): |
|
108 | 108 | """Win32 version of os.system() that works with network shares. |
|
109 | 109 | |
|
110 | 110 | Note that this implementation returns None, as meant for use in IPython. |
|
111 | 111 | |
|
112 | 112 | Parameters |
|
113 | 113 | ---------- |
|
114 | 114 | cmd : str or list |
|
115 | 115 | A command to be executed in the system shell. |
|
116 | 116 | |
|
117 | 117 | Returns |
|
118 | 118 | ------- |
|
119 | 119 | None : we explicitly do NOT return the subprocess status code, as this |
|
120 | 120 | utility is meant to be used extensively in IPython, where any return value |
|
121 | 121 | would trigger :func:`sys.displayhook` calls. |
|
122 | 122 | """ |
|
123 | 123 | # The controller provides interactivity with both |
|
124 | 124 | # stdin and stdout |
|
125 | 125 | #import _process_win32_controller |
|
126 | 126 | #_process_win32_controller.system(cmd) |
|
127 | 127 | |
|
128 | 128 | with AvoidUNCPath() as path: |
|
129 | 129 | if path is not None: |
|
130 | 130 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
131 | 131 | return process_handler(cmd, _system_body) |
|
132 | 132 | |
|
133 | 133 | def getoutput(cmd): |
|
134 | 134 | """Return standard output of executing cmd in a shell. |
|
135 | 135 | |
|
136 | 136 | Accepts the same arguments as os.system(). |
|
137 | 137 | |
|
138 | 138 | Parameters |
|
139 | 139 | ---------- |
|
140 | 140 | cmd : str or list |
|
141 | 141 | A command to be executed in the system shell. |
|
142 | 142 | |
|
143 | 143 | Returns |
|
144 | 144 | ------- |
|
145 | 145 | stdout : str |
|
146 | 146 | """ |
|
147 | 147 | |
|
148 | 148 | with AvoidUNCPath() as path: |
|
149 | 149 | if path is not None: |
|
150 | 150 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
151 | 151 | out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT) |
|
152 | 152 | |
|
153 | 153 | if out is None: |
|
154 | 154 | out = b'' |
|
155 |
return py3compat. |
|
|
155 | return py3compat.decode(out) | |
|
156 | 156 | |
|
157 | 157 | try: |
|
158 | 158 | CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW |
|
159 | 159 | CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)] |
|
160 | 160 | CommandLineToArgvW.restype = POINTER(LPCWSTR) |
|
161 | 161 | LocalFree = ctypes.windll.kernel32.LocalFree |
|
162 | 162 | LocalFree.res_type = HLOCAL |
|
163 | 163 | LocalFree.arg_types = [HLOCAL] |
|
164 | 164 | |
|
165 | 165 | def arg_split(commandline, posix=False, strict=True): |
|
166 | 166 | """Split a command line's arguments in a shell-like manner. |
|
167 | 167 | |
|
168 | 168 | This is a special version for windows that use a ctypes call to CommandLineToArgvW |
|
169 | 169 | to do the argv splitting. The posix paramter is ignored. |
|
170 | 170 | |
|
171 | 171 | If strict=False, process_common.arg_split(...strict=False) is used instead. |
|
172 | 172 | """ |
|
173 | 173 | #CommandLineToArgvW returns path to executable if called with empty string. |
|
174 | 174 | if commandline.strip() == "": |
|
175 | 175 | return [] |
|
176 | 176 | if not strict: |
|
177 | 177 | # not really a cl-arg, fallback on _process_common |
|
178 | 178 | return py_arg_split(commandline, posix=posix, strict=strict) |
|
179 | 179 | argvn = c_int() |
|
180 | 180 | result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn)) |
|
181 | 181 | result_array_type = LPCWSTR * argvn.value |
|
182 | 182 | result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))] |
|
183 | 183 | retval = LocalFree(result_pointer) |
|
184 | 184 | return result |
|
185 | 185 | except AttributeError: |
|
186 | 186 | arg_split = py_arg_split |
|
187 | 187 | |
|
188 | 188 | def check_pid(pid): |
|
189 | 189 | # OpenProcess returns 0 if no such process (of ours) exists |
|
190 | 190 | # positive int otherwise |
|
191 | 191 | return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid)) |
@@ -1,576 +1,573 b'' | |||
|
1 | 1 | """Windows-specific implementation of process utilities with direct WinAPI. |
|
2 | 2 | |
|
3 | 3 | This file is meant to be used by process.py |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Copyright (C) 2010-2011 The IPython Development Team |
|
8 | 8 | # |
|
9 | 9 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | 10 | # the file COPYING, distributed as part of this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | # stdlib |
|
15 | 15 | import os, sys, threading |
|
16 | 16 | import ctypes, msvcrt |
|
17 | 17 | |
|
18 | # local imports | |
|
19 | from . import py3compat | |
|
20 | ||
|
21 | 18 | # Win32 API types needed for the API calls |
|
22 | 19 | from ctypes import POINTER |
|
23 | 20 | from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \ |
|
24 | 21 | ULONG, LPCWSTR |
|
25 | 22 | LPDWORD = POINTER(DWORD) |
|
26 | 23 | LPHANDLE = POINTER(HANDLE) |
|
27 | 24 | ULONG_PTR = POINTER(ULONG) |
|
28 | 25 | class SECURITY_ATTRIBUTES(ctypes.Structure): |
|
29 | 26 | _fields_ = [("nLength", DWORD), |
|
30 | 27 | ("lpSecurityDescriptor", LPVOID), |
|
31 | 28 | ("bInheritHandle", BOOL)] |
|
32 | 29 | LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES) |
|
33 | 30 | class STARTUPINFO(ctypes.Structure): |
|
34 | 31 | _fields_ = [("cb", DWORD), |
|
35 | 32 | ("lpReserved", LPCWSTR), |
|
36 | 33 | ("lpDesktop", LPCWSTR), |
|
37 | 34 | ("lpTitle", LPCWSTR), |
|
38 | 35 | ("dwX", DWORD), |
|
39 | 36 | ("dwY", DWORD), |
|
40 | 37 | ("dwXSize", DWORD), |
|
41 | 38 | ("dwYSize", DWORD), |
|
42 | 39 | ("dwXCountChars", DWORD), |
|
43 | 40 | ("dwYCountChars", DWORD), |
|
44 | 41 | ("dwFillAttribute", DWORD), |
|
45 | 42 | ("dwFlags", DWORD), |
|
46 | 43 | ("wShowWindow", WORD), |
|
47 | 44 | ("cbReserved2", WORD), |
|
48 | 45 | ("lpReserved2", LPVOID), |
|
49 | 46 | ("hStdInput", HANDLE), |
|
50 | 47 | ("hStdOutput", HANDLE), |
|
51 | 48 | ("hStdError", HANDLE)] |
|
52 | 49 | LPSTARTUPINFO = POINTER(STARTUPINFO) |
|
53 | 50 | class PROCESS_INFORMATION(ctypes.Structure): |
|
54 | 51 | _fields_ = [("hProcess", HANDLE), |
|
55 | 52 | ("hThread", HANDLE), |
|
56 | 53 | ("dwProcessId", DWORD), |
|
57 | 54 | ("dwThreadId", DWORD)] |
|
58 | 55 | LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION) |
|
59 | 56 | |
|
60 | 57 | # Win32 API constants needed |
|
61 | 58 | ERROR_HANDLE_EOF = 38 |
|
62 | 59 | ERROR_BROKEN_PIPE = 109 |
|
63 | 60 | ERROR_NO_DATA = 232 |
|
64 | 61 | HANDLE_FLAG_INHERIT = 0x0001 |
|
65 | 62 | STARTF_USESTDHANDLES = 0x0100 |
|
66 | 63 | CREATE_SUSPENDED = 0x0004 |
|
67 | 64 | CREATE_NEW_CONSOLE = 0x0010 |
|
68 | 65 | CREATE_NO_WINDOW = 0x08000000 |
|
69 | 66 | STILL_ACTIVE = 259 |
|
70 | 67 | WAIT_TIMEOUT = 0x0102 |
|
71 | 68 | WAIT_FAILED = 0xFFFFFFFF |
|
72 | 69 | INFINITE = 0xFFFFFFFF |
|
73 | 70 | DUPLICATE_SAME_ACCESS = 0x00000002 |
|
74 | 71 | ENABLE_ECHO_INPUT = 0x0004 |
|
75 | 72 | ENABLE_LINE_INPUT = 0x0002 |
|
76 | 73 | ENABLE_PROCESSED_INPUT = 0x0001 |
|
77 | 74 | |
|
78 | 75 | # Win32 API functions needed |
|
79 | 76 | GetLastError = ctypes.windll.kernel32.GetLastError |
|
80 | 77 | GetLastError.argtypes = [] |
|
81 | 78 | GetLastError.restype = DWORD |
|
82 | 79 | |
|
83 | 80 | CreateFile = ctypes.windll.kernel32.CreateFileW |
|
84 | 81 | CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE] |
|
85 | 82 | CreateFile.restype = HANDLE |
|
86 | 83 | |
|
87 | 84 | CreatePipe = ctypes.windll.kernel32.CreatePipe |
|
88 | 85 | CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE), |
|
89 | 86 | LPSECURITY_ATTRIBUTES, DWORD] |
|
90 | 87 | CreatePipe.restype = BOOL |
|
91 | 88 | |
|
92 | 89 | CreateProcess = ctypes.windll.kernel32.CreateProcessW |
|
93 | 90 | CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES, |
|
94 | 91 | LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO, |
|
95 | 92 | LPPROCESS_INFORMATION] |
|
96 | 93 | CreateProcess.restype = BOOL |
|
97 | 94 | |
|
98 | 95 | GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess |
|
99 | 96 | GetExitCodeProcess.argtypes = [HANDLE, LPDWORD] |
|
100 | 97 | GetExitCodeProcess.restype = BOOL |
|
101 | 98 | |
|
102 | 99 | GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess |
|
103 | 100 | GetCurrentProcess.argtypes = [] |
|
104 | 101 | GetCurrentProcess.restype = HANDLE |
|
105 | 102 | |
|
106 | 103 | ResumeThread = ctypes.windll.kernel32.ResumeThread |
|
107 | 104 | ResumeThread.argtypes = [HANDLE] |
|
108 | 105 | ResumeThread.restype = DWORD |
|
109 | 106 | |
|
110 | 107 | ReadFile = ctypes.windll.kernel32.ReadFile |
|
111 | 108 | ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] |
|
112 | 109 | ReadFile.restype = BOOL |
|
113 | 110 | |
|
114 | 111 | WriteFile = ctypes.windll.kernel32.WriteFile |
|
115 | 112 | WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] |
|
116 | 113 | WriteFile.restype = BOOL |
|
117 | 114 | |
|
118 | 115 | GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode |
|
119 | 116 | GetConsoleMode.argtypes = [HANDLE, LPDWORD] |
|
120 | 117 | GetConsoleMode.restype = BOOL |
|
121 | 118 | |
|
122 | 119 | SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode |
|
123 | 120 | SetConsoleMode.argtypes = [HANDLE, DWORD] |
|
124 | 121 | SetConsoleMode.restype = BOOL |
|
125 | 122 | |
|
126 | 123 | FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer |
|
127 | 124 | FlushConsoleInputBuffer.argtypes = [HANDLE] |
|
128 | 125 | FlushConsoleInputBuffer.restype = BOOL |
|
129 | 126 | |
|
130 | 127 | WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject |
|
131 | 128 | WaitForSingleObject.argtypes = [HANDLE, DWORD] |
|
132 | 129 | WaitForSingleObject.restype = DWORD |
|
133 | 130 | |
|
134 | 131 | DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle |
|
135 | 132 | DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE, |
|
136 | 133 | DWORD, BOOL, DWORD] |
|
137 | 134 | DuplicateHandle.restype = BOOL |
|
138 | 135 | |
|
139 | 136 | SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation |
|
140 | 137 | SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD] |
|
141 | 138 | SetHandleInformation.restype = BOOL |
|
142 | 139 | |
|
143 | 140 | CloseHandle = ctypes.windll.kernel32.CloseHandle |
|
144 | 141 | CloseHandle.argtypes = [HANDLE] |
|
145 | 142 | CloseHandle.restype = BOOL |
|
146 | 143 | |
|
147 | 144 | CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW |
|
148 | 145 | CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)] |
|
149 | 146 | CommandLineToArgvW.restype = POINTER(LPCWSTR) |
|
150 | 147 | |
|
151 | 148 | LocalFree = ctypes.windll.kernel32.LocalFree |
|
152 | 149 | LocalFree.argtypes = [HLOCAL] |
|
153 | 150 | LocalFree.restype = HLOCAL |
|
154 | 151 | |
|
155 | 152 | class AvoidUNCPath(object): |
|
156 | 153 | """A context manager to protect command execution from UNC paths. |
|
157 | 154 | |
|
158 | 155 | In the Win32 API, commands can't be invoked with the cwd being a UNC path. |
|
159 | 156 | This context manager temporarily changes directory to the 'C:' drive on |
|
160 | 157 | entering, and restores the original working directory on exit. |
|
161 | 158 | |
|
162 | 159 | The context manager returns the starting working directory *if* it made a |
|
163 | 160 | change and None otherwise, so that users can apply the necessary adjustment |
|
164 | 161 | to their system calls in the event of a change. |
|
165 | 162 | |
|
166 | 163 | Examples |
|
167 | 164 | -------- |
|
168 | 165 | :: |
|
169 | 166 | cmd = 'dir' |
|
170 | 167 | with AvoidUNCPath() as path: |
|
171 | 168 | if path is not None: |
|
172 | 169 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
173 | 170 | os.system(cmd) |
|
174 | 171 | """ |
|
175 | 172 | def __enter__(self): |
|
176 | 173 | self.path = os.getcwd() |
|
177 | 174 | self.is_unc_path = self.path.startswith(r"\\") |
|
178 | 175 | if self.is_unc_path: |
|
179 | 176 | # change to c drive (as cmd.exe cannot handle UNC addresses) |
|
180 | 177 | os.chdir("C:") |
|
181 | 178 | return self.path |
|
182 | 179 | else: |
|
183 | 180 | # We return None to signal that there was no change in the working |
|
184 | 181 | # directory |
|
185 | 182 | return None |
|
186 | 183 | |
|
187 | 184 | def __exit__(self, exc_type, exc_value, traceback): |
|
188 | 185 | if self.is_unc_path: |
|
189 | 186 | os.chdir(self.path) |
|
190 | 187 | |
|
191 | 188 | |
|
192 | 189 | class Win32ShellCommandController(object): |
|
193 | 190 | """Runs a shell command in a 'with' context. |
|
194 | 191 | |
|
195 | 192 | This implementation is Win32-specific. |
|
196 | 193 | |
|
197 | 194 | Example: |
|
198 | 195 | # Runs the command interactively with default console stdin/stdout |
|
199 | 196 | with ShellCommandController('python -i') as scc: |
|
200 | 197 | scc.run() |
|
201 | 198 | |
|
202 | 199 | # Runs the command using the provided functions for stdin/stdout |
|
203 | 200 | def my_stdout_func(s): |
|
204 | 201 | # print or save the string 's' |
|
205 | 202 | write_to_stdout(s) |
|
206 | 203 | def my_stdin_func(): |
|
207 | 204 | # If input is available, return it as a string. |
|
208 | 205 | if input_available(): |
|
209 | 206 | return get_input() |
|
210 | 207 | # If no input available, return None after a short delay to |
|
211 | 208 | # keep from blocking. |
|
212 | 209 | else: |
|
213 | 210 | time.sleep(0.01) |
|
214 | 211 | return None |
|
215 | 212 | |
|
216 | 213 | with ShellCommandController('python -i') as scc: |
|
217 | 214 | scc.run(my_stdout_func, my_stdin_func) |
|
218 | 215 | """ |
|
219 | 216 | |
|
220 | 217 | def __init__(self, cmd, mergeout = True): |
|
221 | 218 | """Initializes the shell command controller. |
|
222 | 219 | |
|
223 | 220 | The cmd is the program to execute, and mergeout is |
|
224 | 221 | whether to blend stdout and stderr into one output |
|
225 | 222 | in stdout. Merging them together in this fashion more |
|
226 | 223 | reliably keeps stdout and stderr in the correct order |
|
227 | 224 | especially for interactive shell usage. |
|
228 | 225 | """ |
|
229 | 226 | self.cmd = cmd |
|
230 | 227 | self.mergeout = mergeout |
|
231 | 228 | |
|
232 | 229 | def __enter__(self): |
|
233 | 230 | cmd = self.cmd |
|
234 | 231 | mergeout = self.mergeout |
|
235 | 232 | |
|
236 | 233 | self.hstdout, self.hstdin, self.hstderr = None, None, None |
|
237 | 234 | self.piProcInfo = None |
|
238 | 235 | try: |
|
239 | 236 | p_hstdout, c_hstdout, p_hstderr, \ |
|
240 | 237 | c_hstderr, p_hstdin, c_hstdin = [None]*6 |
|
241 | 238 | |
|
242 | 239 | # SECURITY_ATTRIBUTES with inherit handle set to True |
|
243 | 240 | saAttr = SECURITY_ATTRIBUTES() |
|
244 | 241 | saAttr.nLength = ctypes.sizeof(saAttr) |
|
245 | 242 | saAttr.bInheritHandle = True |
|
246 | 243 | saAttr.lpSecurityDescriptor = None |
|
247 | 244 | |
|
248 | 245 | def create_pipe(uninherit): |
|
249 | 246 | """Creates a Windows pipe, which consists of two handles. |
|
250 | 247 | |
|
251 | 248 | The 'uninherit' parameter controls which handle is not |
|
252 | 249 | inherited by the child process. |
|
253 | 250 | """ |
|
254 | 251 | handles = HANDLE(), HANDLE() |
|
255 | 252 | if not CreatePipe(ctypes.byref(handles[0]), |
|
256 | 253 | ctypes.byref(handles[1]), ctypes.byref(saAttr), 0): |
|
257 | 254 | raise ctypes.WinError() |
|
258 | 255 | if not SetHandleInformation(handles[uninherit], |
|
259 | 256 | HANDLE_FLAG_INHERIT, 0): |
|
260 | 257 | raise ctypes.WinError() |
|
261 | 258 | return handles[0].value, handles[1].value |
|
262 | 259 | |
|
263 | 260 | p_hstdout, c_hstdout = create_pipe(uninherit=0) |
|
264 | 261 | # 'mergeout' signals that stdout and stderr should be merged. |
|
265 | 262 | # We do that by using one pipe for both of them. |
|
266 | 263 | if mergeout: |
|
267 | 264 | c_hstderr = HANDLE() |
|
268 | 265 | if not DuplicateHandle(GetCurrentProcess(), c_hstdout, |
|
269 | 266 | GetCurrentProcess(), ctypes.byref(c_hstderr), |
|
270 | 267 | 0, True, DUPLICATE_SAME_ACCESS): |
|
271 | 268 | raise ctypes.WinError() |
|
272 | 269 | else: |
|
273 | 270 | p_hstderr, c_hstderr = create_pipe(uninherit=0) |
|
274 | 271 | c_hstdin, p_hstdin = create_pipe(uninherit=1) |
|
275 | 272 | |
|
276 | 273 | # Create the process object |
|
277 | 274 | piProcInfo = PROCESS_INFORMATION() |
|
278 | 275 | siStartInfo = STARTUPINFO() |
|
279 | 276 | siStartInfo.cb = ctypes.sizeof(siStartInfo) |
|
280 | 277 | siStartInfo.hStdInput = c_hstdin |
|
281 | 278 | siStartInfo.hStdOutput = c_hstdout |
|
282 | 279 | siStartInfo.hStdError = c_hstderr |
|
283 | 280 | siStartInfo.dwFlags = STARTF_USESTDHANDLES |
|
284 | 281 | dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE |
|
285 | 282 | |
|
286 | 283 | if not CreateProcess(None, |
|
287 | 284 | u"cmd.exe /c " + cmd, |
|
288 | 285 | None, None, True, dwCreationFlags, |
|
289 | 286 | None, None, ctypes.byref(siStartInfo), |
|
290 | 287 | ctypes.byref(piProcInfo)): |
|
291 | 288 | raise ctypes.WinError() |
|
292 | 289 | |
|
293 | 290 | # Close this process's versions of the child handles |
|
294 | 291 | CloseHandle(c_hstdin) |
|
295 | 292 | c_hstdin = None |
|
296 | 293 | CloseHandle(c_hstdout) |
|
297 | 294 | c_hstdout = None |
|
298 | 295 | if c_hstderr is not None: |
|
299 | 296 | CloseHandle(c_hstderr) |
|
300 | 297 | c_hstderr = None |
|
301 | 298 | |
|
302 | 299 | # Transfer ownership of the parent handles to the object |
|
303 | 300 | self.hstdin = p_hstdin |
|
304 | 301 | p_hstdin = None |
|
305 | 302 | self.hstdout = p_hstdout |
|
306 | 303 | p_hstdout = None |
|
307 | 304 | if not mergeout: |
|
308 | 305 | self.hstderr = p_hstderr |
|
309 | 306 | p_hstderr = None |
|
310 | 307 | self.piProcInfo = piProcInfo |
|
311 | 308 | |
|
312 | 309 | finally: |
|
313 | 310 | if p_hstdin: |
|
314 | 311 | CloseHandle(p_hstdin) |
|
315 | 312 | if c_hstdin: |
|
316 | 313 | CloseHandle(c_hstdin) |
|
317 | 314 | if p_hstdout: |
|
318 | 315 | CloseHandle(p_hstdout) |
|
319 | 316 | if c_hstdout: |
|
320 | 317 | CloseHandle(c_hstdout) |
|
321 | 318 | if p_hstderr: |
|
322 | 319 | CloseHandle(p_hstderr) |
|
323 | 320 | if c_hstderr: |
|
324 | 321 | CloseHandle(c_hstderr) |
|
325 | 322 | |
|
326 | 323 | return self |
|
327 | 324 | |
|
328 | 325 | def _stdin_thread(self, handle, hprocess, func, stdout_func): |
|
329 | 326 | exitCode = DWORD() |
|
330 | 327 | bytesWritten = DWORD(0) |
|
331 | 328 | while True: |
|
332 | 329 | #print("stdin thread loop start") |
|
333 | 330 | # Get the input string (may be bytes or unicode) |
|
334 | 331 | data = func() |
|
335 | 332 | |
|
336 | 333 | # None signals to poll whether the process has exited |
|
337 | 334 | if data is None: |
|
338 | 335 | #print("checking for process completion") |
|
339 | 336 | if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)): |
|
340 | 337 | raise ctypes.WinError() |
|
341 | 338 | if exitCode.value != STILL_ACTIVE: |
|
342 | 339 | return |
|
343 | 340 | # TESTING: Does zero-sized writefile help? |
|
344 | 341 | if not WriteFile(handle, "", 0, |
|
345 | 342 | ctypes.byref(bytesWritten), None): |
|
346 | 343 | raise ctypes.WinError() |
|
347 | 344 | continue |
|
348 | 345 | #print("\nGot str %s\n" % repr(data), file=sys.stderr) |
|
349 | 346 | |
|
350 | 347 | # Encode the string to the console encoding |
|
351 | 348 | if isinstance(data, unicode): #FIXME: Python3 |
|
352 | 349 | data = data.encode('utf_8') |
|
353 | 350 | |
|
354 | 351 | # What we have now must be a string of bytes |
|
355 | 352 | if not isinstance(data, str): #FIXME: Python3 |
|
356 | 353 | raise RuntimeError("internal stdin function string error") |
|
357 | 354 | |
|
358 | 355 | # An empty string signals EOF |
|
359 | 356 | if len(data) == 0: |
|
360 | 357 | return |
|
361 | 358 | |
|
362 | 359 | # In a windows console, sometimes the input is echoed, |
|
363 | 360 | # but sometimes not. How do we determine when to do this? |
|
364 | 361 | stdout_func(data) |
|
365 | 362 | # WriteFile may not accept all the data at once. |
|
366 | 363 | # Loop until everything is processed |
|
367 | 364 | while len(data) != 0: |
|
368 | 365 | #print("Calling writefile") |
|
369 | 366 | if not WriteFile(handle, data, len(data), |
|
370 | 367 | ctypes.byref(bytesWritten), None): |
|
371 | 368 | # This occurs at exit |
|
372 | 369 | if GetLastError() == ERROR_NO_DATA: |
|
373 | 370 | return |
|
374 | 371 | raise ctypes.WinError() |
|
375 | 372 | #print("Called writefile") |
|
376 | 373 | data = data[bytesWritten.value:] |
|
377 | 374 | |
|
378 | 375 | def _stdout_thread(self, handle, func): |
|
379 | 376 | # Allocate the output buffer |
|
380 | 377 | data = ctypes.create_string_buffer(4096) |
|
381 | 378 | while True: |
|
382 | 379 | bytesRead = DWORD(0) |
|
383 | 380 | if not ReadFile(handle, data, 4096, |
|
384 | 381 | ctypes.byref(bytesRead), None): |
|
385 | 382 | le = GetLastError() |
|
386 | 383 | if le == ERROR_BROKEN_PIPE: |
|
387 | 384 | return |
|
388 | 385 | else: |
|
389 | 386 | raise ctypes.WinError() |
|
390 | 387 | # FIXME: Python3 |
|
391 | 388 | s = data.value[0:bytesRead.value] |
|
392 | 389 | #print("\nv: %s" % repr(s), file=sys.stderr) |
|
393 | 390 | func(s.decode('utf_8', 'replace')) |
|
394 | 391 | |
|
395 | 392 | def run(self, stdout_func = None, stdin_func = None, stderr_func = None): |
|
396 | 393 | """Runs the process, using the provided functions for I/O. |
|
397 | 394 | |
|
398 | 395 | The function stdin_func should return strings whenever a |
|
399 | 396 | character or characters become available. |
|
400 | 397 | The functions stdout_func and stderr_func are called whenever |
|
401 | 398 | something is printed to stdout or stderr, respectively. |
|
402 | 399 | These functions are called from different threads (but not |
|
403 | 400 | concurrently, because of the GIL). |
|
404 | 401 | """ |
|
405 | 402 | if stdout_func is None and stdin_func is None and stderr_func is None: |
|
406 | 403 | return self._run_stdio() |
|
407 | 404 | |
|
408 | 405 | if stderr_func is not None and self.mergeout: |
|
409 | 406 | raise RuntimeError("Shell command was initiated with " |
|
410 | 407 | "merged stdin/stdout, but a separate stderr_func " |
|
411 | 408 | "was provided to the run() method") |
|
412 | 409 | |
|
413 | 410 | # Create a thread for each input/output handle |
|
414 | 411 | stdin_thread = None |
|
415 | 412 | threads = [] |
|
416 | 413 | if stdin_func: |
|
417 | 414 | stdin_thread = threading.Thread(target=self._stdin_thread, |
|
418 | 415 | args=(self.hstdin, self.piProcInfo.hProcess, |
|
419 | 416 | stdin_func, stdout_func)) |
|
420 | 417 | threads.append(threading.Thread(target=self._stdout_thread, |
|
421 | 418 | args=(self.hstdout, stdout_func))) |
|
422 | 419 | if not self.mergeout: |
|
423 | 420 | if stderr_func is None: |
|
424 | 421 | stderr_func = stdout_func |
|
425 | 422 | threads.append(threading.Thread(target=self._stdout_thread, |
|
426 | 423 | args=(self.hstderr, stderr_func))) |
|
427 | 424 | # Start the I/O threads and the process |
|
428 | 425 | if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF: |
|
429 | 426 | raise ctypes.WinError() |
|
430 | 427 | if stdin_thread is not None: |
|
431 | 428 | stdin_thread.start() |
|
432 | 429 | for thread in threads: |
|
433 | 430 | thread.start() |
|
434 | 431 | # Wait for the process to complete |
|
435 | 432 | if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \ |
|
436 | 433 | WAIT_FAILED: |
|
437 | 434 | raise ctypes.WinError() |
|
438 | 435 | # Wait for the I/O threads to complete |
|
439 | 436 | for thread in threads: |
|
440 | 437 | thread.join() |
|
441 | 438 | |
|
442 | 439 | # Wait for the stdin thread to complete |
|
443 | 440 | if stdin_thread is not None: |
|
444 | 441 | stdin_thread.join() |
|
445 | 442 | |
|
446 | 443 | def _stdin_raw_nonblock(self): |
|
447 | 444 | """Use the raw Win32 handle of sys.stdin to do non-blocking reads""" |
|
448 | 445 | # WARNING: This is experimental, and produces inconsistent results. |
|
449 | 446 | # It's possible for the handle not to be appropriate for use |
|
450 | 447 | # with WaitForSingleObject, among other things. |
|
451 | 448 | handle = msvcrt.get_osfhandle(sys.stdin.fileno()) |
|
452 | 449 | result = WaitForSingleObject(handle, 100) |
|
453 | 450 | if result == WAIT_FAILED: |
|
454 | 451 | raise ctypes.WinError() |
|
455 | 452 | elif result == WAIT_TIMEOUT: |
|
456 | 453 | print(".", end='') |
|
457 | 454 | return None |
|
458 | 455 | else: |
|
459 | 456 | data = ctypes.create_string_buffer(256) |
|
460 | 457 | bytesRead = DWORD(0) |
|
461 | 458 | print('?', end='') |
|
462 | 459 | |
|
463 | 460 | if not ReadFile(handle, data, 256, |
|
464 | 461 | ctypes.byref(bytesRead), None): |
|
465 | 462 | raise ctypes.WinError() |
|
466 | 463 | # This ensures the non-blocking works with an actual console |
|
467 | 464 | # Not checking the error, so the processing will still work with |
|
468 | 465 | # other handle types |
|
469 | 466 | FlushConsoleInputBuffer(handle) |
|
470 | 467 | |
|
471 | 468 | data = data.value |
|
472 | 469 | data = data.replace('\r\n', '\n') |
|
473 | 470 | data = data.replace('\r', '\n') |
|
474 | 471 | print(repr(data) + " ", end='') |
|
475 | 472 | return data |
|
476 | 473 | |
|
477 | 474 | def _stdin_raw_block(self): |
|
478 | 475 | """Use a blocking stdin read""" |
|
479 | 476 | # The big problem with the blocking read is that it doesn't |
|
480 | 477 | # exit when it's supposed to in all contexts. An extra |
|
481 | 478 | # key-press may be required to trigger the exit. |
|
482 | 479 | try: |
|
483 | 480 | data = sys.stdin.read(1) |
|
484 | 481 | data = data.replace('\r', '\n') |
|
485 | 482 | return data |
|
486 | 483 | except WindowsError as we: |
|
487 | 484 | if we.winerror == ERROR_NO_DATA: |
|
488 | 485 | # This error occurs when the pipe is closed |
|
489 | 486 | return None |
|
490 | 487 | else: |
|
491 | 488 | # Otherwise let the error propagate |
|
492 | 489 | raise we |
|
493 | 490 | |
|
494 | 491 | def _stdout_raw(self, s): |
|
495 | 492 | """Writes the string to stdout""" |
|
496 | 493 | print(s, end='', file=sys.stdout) |
|
497 | 494 | sys.stdout.flush() |
|
498 | 495 | |
|
499 | 496 | def _stderr_raw(self, s): |
|
500 | 497 | """Writes the string to stdout""" |
|
501 | 498 | print(s, end='', file=sys.stderr) |
|
502 | 499 | sys.stderr.flush() |
|
503 | 500 | |
|
504 | 501 | def _run_stdio(self): |
|
505 | 502 | """Runs the process using the system standard I/O. |
|
506 | 503 | |
|
507 | 504 | IMPORTANT: stdin needs to be asynchronous, so the Python |
|
508 | 505 | sys.stdin object is not used. Instead, |
|
509 | 506 | msvcrt.kbhit/getwch are used asynchronously. |
|
510 | 507 | """ |
|
511 | 508 | # Disable Line and Echo mode |
|
512 | 509 | #lpMode = DWORD() |
|
513 | 510 | #handle = msvcrt.get_osfhandle(sys.stdin.fileno()) |
|
514 | 511 | #if GetConsoleMode(handle, ctypes.byref(lpMode)): |
|
515 | 512 | # set_console_mode = True |
|
516 | 513 | # if not SetConsoleMode(handle, lpMode.value & |
|
517 | 514 | # ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)): |
|
518 | 515 | # raise ctypes.WinError() |
|
519 | 516 | |
|
520 | 517 | if self.mergeout: |
|
521 | 518 | return self.run(stdout_func = self._stdout_raw, |
|
522 | 519 | stdin_func = self._stdin_raw_block) |
|
523 | 520 | else: |
|
524 | 521 | return self.run(stdout_func = self._stdout_raw, |
|
525 | 522 | stdin_func = self._stdin_raw_block, |
|
526 | 523 | stderr_func = self._stderr_raw) |
|
527 | 524 | |
|
528 | 525 | # Restore the previous console mode |
|
529 | 526 | #if set_console_mode: |
|
530 | 527 | # if not SetConsoleMode(handle, lpMode.value): |
|
531 | 528 | # raise ctypes.WinError() |
|
532 | 529 | |
|
533 | 530 | def __exit__(self, exc_type, exc_value, traceback): |
|
534 | 531 | if self.hstdin: |
|
535 | 532 | CloseHandle(self.hstdin) |
|
536 | 533 | self.hstdin = None |
|
537 | 534 | if self.hstdout: |
|
538 | 535 | CloseHandle(self.hstdout) |
|
539 | 536 | self.hstdout = None |
|
540 | 537 | if self.hstderr: |
|
541 | 538 | CloseHandle(self.hstderr) |
|
542 | 539 | self.hstderr = None |
|
543 | 540 | if self.piProcInfo is not None: |
|
544 | 541 | CloseHandle(self.piProcInfo.hProcess) |
|
545 | 542 | CloseHandle(self.piProcInfo.hThread) |
|
546 | 543 | self.piProcInfo = None |
|
547 | 544 | |
|
548 | 545 | |
|
549 | 546 | def system(cmd): |
|
550 | 547 | """Win32 version of os.system() that works with network shares. |
|
551 | 548 | |
|
552 | 549 | Note that this implementation returns None, as meant for use in IPython. |
|
553 | 550 | |
|
554 | 551 | Parameters |
|
555 | 552 | ---------- |
|
556 | 553 | cmd : str |
|
557 | 554 | A command to be executed in the system shell. |
|
558 | 555 | |
|
559 | 556 | Returns |
|
560 | 557 | ------- |
|
561 | 558 | None : we explicitly do NOT return the subprocess status code, as this |
|
562 | 559 | utility is meant to be used extensively in IPython, where any return value |
|
563 | 560 | would trigger :func:`sys.displayhook` calls. |
|
564 | 561 | """ |
|
565 | 562 | with AvoidUNCPath() as path: |
|
566 | 563 | if path is not None: |
|
567 | 564 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
568 | 565 | with Win32ShellCommandController(cmd) as scc: |
|
569 | 566 | scc.run() |
|
570 | 567 | |
|
571 | 568 | |
|
572 | 569 | if __name__ == "__main__": |
|
573 | 570 | print("Test starting!") |
|
574 | 571 | #system("cmd") |
|
575 | 572 | system("python -i") |
|
576 | 573 | print("Test finished!") |
@@ -1,69 +1,69 b'' | |||
|
1 | 1 | # encoding: utf-8 |
|
2 | 2 | """ |
|
3 | 3 | Utilities for working with external processes. |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | # Copyright (c) IPython Development Team. |
|
7 | 7 | # Distributed under the terms of the Modified BSD License. |
|
8 | 8 | |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | import shutil | |
|
11 | 12 | import sys |
|
12 | 13 | |
|
13 | 14 | if sys.platform == 'win32': |
|
14 | 15 | from ._process_win32 import system, getoutput, arg_split, check_pid |
|
15 | 16 | elif sys.platform == 'cli': |
|
16 | 17 | from ._process_cli import system, getoutput, arg_split, check_pid |
|
17 | 18 | else: |
|
18 | 19 | from ._process_posix import system, getoutput, arg_split, check_pid |
|
19 | 20 | |
|
20 | 21 | from ._process_common import getoutputerror, get_output_error_code, process_handler |
|
21 | from . import py3compat | |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | class FindCmdError(Exception): |
|
25 | 25 | pass |
|
26 | 26 | |
|
27 | 27 | |
|
28 | 28 | def find_cmd(cmd): |
|
29 | 29 | """Find absolute path to executable cmd in a cross platform manner. |
|
30 | 30 | |
|
31 | 31 | This function tries to determine the full path to a command line program |
|
32 | 32 | using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the |
|
33 | 33 | time it will use the version that is first on the users `PATH`. |
|
34 | 34 | |
|
35 | 35 | Warning, don't use this to find IPython command line programs as there |
|
36 | 36 | is a risk you will find the wrong one. Instead find those using the |
|
37 | 37 | following code and looking for the application itself:: |
|
38 | 38 | |
|
39 | 39 | import sys |
|
40 | 40 | argv = [sys.executable, '-m', 'IPython'] |
|
41 | 41 | |
|
42 | 42 | Parameters |
|
43 | 43 | ---------- |
|
44 | 44 | cmd : str |
|
45 | 45 | The command line program to look for. |
|
46 | 46 | """ |
|
47 |
path = |
|
|
47 | path = shutil.which(cmd) | |
|
48 | 48 | if path is None: |
|
49 | 49 | raise FindCmdError('command could not be found: %s' % cmd) |
|
50 | 50 | return path |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | def abbrev_cwd(): |
|
54 | 54 | """ Return abbreviated version of cwd, e.g. d:mydir """ |
|
55 | 55 | cwd = os.getcwd().replace('\\','/') |
|
56 | 56 | drivepart = '' |
|
57 | 57 | tail = cwd |
|
58 | 58 | if sys.platform == 'win32': |
|
59 | 59 | if len(cwd) < 4: |
|
60 | 60 | return cwd |
|
61 | 61 | drivepart,tail = os.path.splitdrive(cwd) |
|
62 | 62 | |
|
63 | 63 | |
|
64 | 64 | parts = tail.split('/') |
|
65 | 65 | if len(parts) > 2: |
|
66 | 66 | tail = '/'.join(parts[-2:]) |
|
67 | 67 | |
|
68 | 68 | return (drivepart + ( |
|
69 | 69 | cwd == '/' and '/' or tail)) |
@@ -1,336 +1,255 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | """Compatibility tricks for Python 3. Mainly to do with unicode.""" |
|
3 | 3 | import functools |
|
4 | 4 | import os |
|
5 | 5 | import sys |
|
6 | 6 | import re |
|
7 | 7 | import shutil |
|
8 | 8 | import types |
|
9 | 9 | import platform |
|
10 | 10 | |
|
11 | 11 | from .encoding import DEFAULT_ENCODING |
|
12 | 12 | |
|
13 | 13 | def no_code(x, encoding=None): |
|
14 | 14 | return x |
|
15 | 15 | |
|
16 | 16 | def decode(s, encoding=None): |
|
17 | 17 | encoding = encoding or DEFAULT_ENCODING |
|
18 | 18 | return s.decode(encoding, "replace") |
|
19 | 19 | |
|
20 | 20 | def encode(u, encoding=None): |
|
21 | 21 | encoding = encoding or DEFAULT_ENCODING |
|
22 | 22 | return u.encode(encoding, "replace") |
|
23 | 23 | |
|
24 | 24 | |
|
25 | 25 | def cast_unicode(s, encoding=None): |
|
26 | 26 | if isinstance(s, bytes): |
|
27 | 27 | return decode(s, encoding) |
|
28 | 28 | return s |
|
29 | 29 | |
|
30 | 30 | def cast_bytes(s, encoding=None): |
|
31 | 31 | if not isinstance(s, bytes): |
|
32 | 32 | return encode(s, encoding) |
|
33 | 33 | return s |
|
34 | 34 | |
|
35 | 35 | def buffer_to_bytes(buf): |
|
36 | 36 | """Cast a buffer object to bytes""" |
|
37 | 37 | if not isinstance(buf, bytes): |
|
38 | 38 | buf = bytes(buf) |
|
39 | 39 | return buf |
|
40 | 40 | |
|
41 | 41 | def _modify_str_or_docstring(str_change_func): |
|
42 | 42 | @functools.wraps(str_change_func) |
|
43 | 43 | def wrapper(func_or_str): |
|
44 | 44 | if isinstance(func_or_str, string_types): |
|
45 | 45 | func = None |
|
46 | 46 | doc = func_or_str |
|
47 | 47 | else: |
|
48 | 48 | func = func_or_str |
|
49 | 49 | doc = func.__doc__ |
|
50 | 50 | |
|
51 | 51 | # PYTHONOPTIMIZE=2 strips docstrings, so they can disappear unexpectedly |
|
52 | 52 | if doc is not None: |
|
53 | 53 | doc = str_change_func(doc) |
|
54 | 54 | |
|
55 | 55 | if func: |
|
56 | 56 | func.__doc__ = doc |
|
57 | 57 | return func |
|
58 | 58 | return doc |
|
59 | 59 | return wrapper |
|
60 | 60 | |
|
61 | 61 | def safe_unicode(e): |
|
62 | 62 | """unicode(e) with various fallbacks. Used for exceptions, which may not be |
|
63 | 63 | safe to call unicode() on. |
|
64 | 64 | """ |
|
65 | 65 | try: |
|
66 | 66 | return unicode_type(e) |
|
67 | 67 | except UnicodeError: |
|
68 | 68 | pass |
|
69 | 69 | |
|
70 | 70 | try: |
|
71 | 71 | return str_to_unicode(str(e)) |
|
72 | 72 | except UnicodeError: |
|
73 | 73 | pass |
|
74 | 74 | |
|
75 | 75 | try: |
|
76 | 76 | return str_to_unicode(repr(e)) |
|
77 | 77 | except UnicodeError: |
|
78 | 78 | pass |
|
79 | 79 | |
|
80 | 80 | return u'Unrecoverably corrupt evalue' |
|
81 | 81 | |
|
82 | 82 | # shutil.which from Python 3.4 |
|
83 | 83 | def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): |
|
84 | 84 | """Given a command, mode, and a PATH string, return the path which |
|
85 | 85 | conforms to the given mode on the PATH, or None if there is no such |
|
86 | 86 | file. |
|
87 | 87 | |
|
88 | 88 | `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result |
|
89 | 89 | of os.environ.get("PATH"), or can be overridden with a custom search |
|
90 | 90 | path. |
|
91 | 91 | |
|
92 | 92 | This is a backport of shutil.which from Python 3.4 |
|
93 | 93 | """ |
|
94 | 94 | # Check that a given file can be accessed with the correct mode. |
|
95 | 95 | # Additionally check that `file` is not a directory, as on Windows |
|
96 | 96 | # directories pass the os.access check. |
|
97 | 97 | def _access_check(fn, mode): |
|
98 | 98 | return (os.path.exists(fn) and os.access(fn, mode) |
|
99 | 99 | and not os.path.isdir(fn)) |
|
100 | 100 | |
|
101 | 101 | # If we're given a path with a directory part, look it up directly rather |
|
102 | 102 | # than referring to PATH directories. This includes checking relative to the |
|
103 | 103 | # current directory, e.g. ./script |
|
104 | 104 | if os.path.dirname(cmd): |
|
105 | 105 | if _access_check(cmd, mode): |
|
106 | 106 | return cmd |
|
107 | 107 | return None |
|
108 | 108 | |
|
109 | 109 | if path is None: |
|
110 | 110 | path = os.environ.get("PATH", os.defpath) |
|
111 | 111 | if not path: |
|
112 | 112 | return None |
|
113 | 113 | path = path.split(os.pathsep) |
|
114 | 114 | |
|
115 | 115 | if sys.platform == "win32": |
|
116 | 116 | # The current directory takes precedence on Windows. |
|
117 | 117 | if not os.curdir in path: |
|
118 | 118 | path.insert(0, os.curdir) |
|
119 | 119 | |
|
120 | 120 | # PATHEXT is necessary to check on Windows. |
|
121 | 121 | pathext = os.environ.get("PATHEXT", "").split(os.pathsep) |
|
122 | 122 | # See if the given file matches any of the expected path extensions. |
|
123 | 123 | # This will allow us to short circuit when given "python.exe". |
|
124 | 124 | # If it does match, only test that one, otherwise we have to try |
|
125 | 125 | # others. |
|
126 | 126 | if any(cmd.lower().endswith(ext.lower()) for ext in pathext): |
|
127 | 127 | files = [cmd] |
|
128 | 128 | else: |
|
129 | 129 | files = [cmd + ext for ext in pathext] |
|
130 | 130 | else: |
|
131 | 131 | # On other platforms you don't have things like PATHEXT to tell you |
|
132 | 132 | # what file suffixes are executable, so just pass on cmd as-is. |
|
133 | 133 | files = [cmd] |
|
134 | 134 | |
|
135 | 135 | seen = set() |
|
136 | 136 | for dir in path: |
|
137 | 137 | normdir = os.path.normcase(dir) |
|
138 | 138 | if not normdir in seen: |
|
139 | 139 | seen.add(normdir) |
|
140 | 140 | for thefile in files: |
|
141 | 141 | name = os.path.join(dir, thefile) |
|
142 | 142 | if _access_check(name, mode): |
|
143 | 143 | return name |
|
144 | 144 | return None |
|
145 | 145 | |
|
146 | if sys.version_info[0] >= 3: | |
|
147 | PY3 = True | |
|
148 | ||
|
149 | # keep reference to builtin_mod because the kernel overrides that value | |
|
150 | # to forward requests to a frontend. | |
|
151 | def input(prompt=''): | |
|
152 | return builtin_mod.input(prompt) | |
|
153 | ||
|
154 | builtin_mod_name = "builtins" | |
|
155 | import builtins as builtin_mod | |
|
156 | ||
|
157 |
|
|
|
158 | unicode_to_str = no_code | |
|
159 | str_to_bytes = encode | |
|
160 | bytes_to_str = decode | |
|
161 |
|
|
|
162 | cast_unicode_py2 = no_code | |
|
163 | buffer_to_bytes_py2 = no_code | |
|
164 | ||
|
165 | string_types = (str,) | |
|
166 | unicode_type = str | |
|
167 | ||
|
168 | which = shutil.which | |
|
169 | ||
|
170 | def isidentifier(s, dotted=False): | |
|
171 | if dotted: | |
|
172 | return all(isidentifier(a) for a in s.split(".")) | |
|
173 | return s.isidentifier() | |
|
174 | ||
|
175 | xrange = range | |
|
176 |
|
|
|
177 | def itervalues(d): return iter(d.values()) | |
|
178 | getcwd = os.getcwd | |
|
179 | ||
|
180 | MethodType = types.MethodType | |
|
181 | ||
|
182 | def execfile(fname, glob, loc=None, compiler=None): | |
|
183 | loc = loc if (loc is not None) else glob | |
|
184 | with open(fname, 'rb') as f: | |
|
185 | compiler = compiler or compile | |
|
186 | exec(compiler(f.read(), fname, 'exec'), glob, loc) | |
|
187 | ||
|
188 | # Refactor print statements in doctests. | |
|
189 | _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE) | |
|
190 | def _print_statement_sub(match): | |
|
191 | expr = match.groups('expr') | |
|
192 | return "print(%s)" % expr | |
|
193 | ||
|
194 | @_modify_str_or_docstring | |
|
195 | def doctest_refactor_print(doc): | |
|
196 | """Refactor 'print x' statements in a doctest to print(x) style. 2to3 | |
|
197 | unfortunately doesn't pick up on our doctests. | |
|
198 | ||
|
199 | Can accept a string or a function, so it can be used as a decorator.""" | |
|
200 | return _print_statement_re.sub(_print_statement_sub, doc) | |
|
201 | ||
|
202 | # Abstract u'abc' syntax: | |
|
203 | @_modify_str_or_docstring | |
|
204 | def u_format(s): | |
|
205 | """"{u}'abc'" --> "'abc'" (Python 3) | |
|
206 | ||
|
207 | Accepts a string or a function, so it can be used as a decorator.""" | |
|
208 | return s.format(u='') | |
|
209 | ||
|
210 | def get_closure(f): | |
|
211 | """Get a function's closure attribute""" | |
|
212 | return f.__closure__ | |
|
213 | ||
|
214 | else: | |
|
215 | PY3 = False | |
|
216 | ||
|
217 | # keep reference to builtin_mod because the kernel overrides that value | |
|
218 | # to forward requests to a frontend. | |
|
219 | def input(prompt=''): | |
|
220 | return builtin_mod.raw_input(prompt) | |
|
221 | ||
|
222 | builtin_mod_name = "__builtin__" | |
|
223 | import __builtin__ as builtin_mod | |
|
224 | ||
|
225 | str_to_unicode = decode | |
|
226 | unicode_to_str = encode | |
|
227 | str_to_bytes = no_code | |
|
228 | bytes_to_str = no_code | |
|
229 | cast_bytes_py2 = cast_bytes | |
|
230 | cast_unicode_py2 = cast_unicode | |
|
231 | buffer_to_bytes_py2 = buffer_to_bytes | |
|
232 | ||
|
233 | string_types = (str, unicode) | |
|
234 | unicode_type = unicode | |
|
235 | ||
|
236 | import re | |
|
237 | _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") | |
|
238 | def isidentifier(s, dotted=False): | |
|
239 | if dotted: | |
|
240 | return all(isidentifier(a) for a in s.split(".")) | |
|
241 | return bool(_name_re.match(s)) | |
|
242 | ||
|
243 | xrange = xrange | |
|
244 | def iteritems(d): return d.iteritems() | |
|
245 | def itervalues(d): return d.itervalues() | |
|
246 | getcwd = os.getcwdu | |
|
247 | ||
|
248 | def MethodType(func, instance): | |
|
249 | return types.MethodType(func, instance, type(instance)) | |
|
250 | ||
|
251 | def doctest_refactor_print(func_or_str): | |
|
252 | return func_or_str | |
|
253 | ||
|
254 | def get_closure(f): | |
|
255 | """Get a function's closure attribute""" | |
|
256 | return f.func_closure | |
|
257 | ||
|
258 | which = _shutil_which | |
|
259 | ||
|
260 | # Abstract u'abc' syntax: | |
|
261 | @_modify_str_or_docstring | |
|
262 | def u_format(s): | |
|
263 | """"{u}'abc'" --> "u'abc'" (Python 2) | |
|
264 | ||
|
265 | Accepts a string or a function, so it can be used as a decorator.""" | |
|
266 | return s.format(u='u') | |
|
267 | ||
|
268 | if sys.platform == 'win32': | |
|
269 | def execfile(fname, glob=None, loc=None, compiler=None): | |
|
270 | loc = loc if (loc is not None) else glob | |
|
271 | scripttext = builtin_mod.open(fname).read()+ '\n' | |
|
272 | # compile converts unicode filename to str assuming | |
|
273 | # ascii. Let's do the conversion before calling compile | |
|
274 | if isinstance(fname, unicode): | |
|
275 | filename = unicode_to_str(fname) | |
|
276 | else: | |
|
277 | filename = fname | |
|
278 | compiler = compiler or compile | |
|
279 | exec(compiler(scripttext, filename, 'exec'), glob, loc) | |
|
280 | ||
|
281 | else: | |
|
282 | def execfile(fname, glob=None, loc=None, compiler=None): | |
|
283 | if isinstance(fname, unicode): | |
|
284 | filename = fname.encode(sys.getfilesystemencoding()) | |
|
285 | else: | |
|
286 | filename = fname | |
|
287 | where = [ns for ns in [glob, loc] if ns is not None] | |
|
288 | if compiler is None: | |
|
289 | builtin_mod.execfile(filename, *where) | |
|
290 | else: | |
|
291 | scripttext = builtin_mod.open(fname).read().rstrip() + '\n' | |
|
292 | exec(compiler(scripttext, filename, 'exec'), glob, loc) | |
|
146 | PY3 = True | |
|
147 | ||
|
148 | # keep reference to builtin_mod because the kernel overrides that value | |
|
149 | # to forward requests to a frontend. | |
|
150 | def input(prompt=''): | |
|
151 | return builtin_mod.input(prompt) | |
|
152 | ||
|
153 | builtin_mod_name = "builtins" | |
|
154 | import builtins as builtin_mod | |
|
155 | ||
|
156 | str_to_unicode = no_code | |
|
157 | unicode_to_str = no_code | |
|
158 | str_to_bytes = encode | |
|
159 | bytes_to_str = decode | |
|
160 | cast_bytes_py2 = no_code | |
|
161 | cast_unicode_py2 = no_code | |
|
162 | buffer_to_bytes_py2 = no_code | |
|
163 | ||
|
164 | string_types = (str,) | |
|
165 | unicode_type = str | |
|
166 | ||
|
167 | which = shutil.which | |
|
168 | ||
|
169 | def isidentifier(s, dotted=False): | |
|
170 | if dotted: | |
|
171 | return all(isidentifier(a) for a in s.split(".")) | |
|
172 | return s.isidentifier() | |
|
173 | ||
|
174 | xrange = range | |
|
175 | def iteritems(d): return iter(d.items()) | |
|
176 | def itervalues(d): return iter(d.values()) | |
|
177 | getcwd = os.getcwd | |
|
178 | ||
|
179 | MethodType = types.MethodType | |
|
180 | ||
|
181 | def execfile(fname, glob, loc=None, compiler=None): | |
|
182 | loc = loc if (loc is not None) else glob | |
|
183 | with open(fname, 'rb') as f: | |
|
184 | compiler = compiler or compile | |
|
185 | exec(compiler(f.read(), fname, 'exec'), glob, loc) | |
|
186 | ||
|
187 | # Refactor print statements in doctests. | |
|
188 | _print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE) | |
|
189 | def _print_statement_sub(match): | |
|
190 | expr = match.groups('expr') | |
|
191 | return "print(%s)" % expr | |
|
192 | ||
|
193 | @_modify_str_or_docstring | |
|
194 | def doctest_refactor_print(doc): | |
|
195 | """Refactor 'print x' statements in a doctest to print(x) style. 2to3 | |
|
196 | unfortunately doesn't pick up on our doctests. | |
|
197 | ||
|
198 | Can accept a string or a function, so it can be used as a decorator.""" | |
|
199 | return _print_statement_re.sub(_print_statement_sub, doc) | |
|
200 | ||
|
201 | # Abstract u'abc' syntax: | |
|
202 | @_modify_str_or_docstring | |
|
203 | def u_format(s): | |
|
204 | """"{u}'abc'" --> "'abc'" (Python 3) | |
|
205 | ||
|
206 | Accepts a string or a function, so it can be used as a decorator.""" | |
|
207 | return s.format(u='') | |
|
208 | ||
|
209 | def get_closure(f): | |
|
210 | """Get a function's closure attribute""" | |
|
211 | return f.__closure__ | |
|
293 | 212 | |
|
294 | 213 | |
|
295 | 214 | PY2 = not PY3 |
|
296 | 215 | PYPY = platform.python_implementation() == "PyPy" |
|
297 | 216 | |
|
298 | 217 | |
|
299 | 218 | def annotate(**kwargs): |
|
300 | 219 | """Python 3 compatible function annotation for Python 2.""" |
|
301 | 220 | if not kwargs: |
|
302 | 221 | raise ValueError('annotations must be provided as keyword arguments') |
|
303 | 222 | def dec(f): |
|
304 | 223 | if hasattr(f, '__annotations__'): |
|
305 | 224 | for k, v in kwargs.items(): |
|
306 | 225 | f.__annotations__[k] = v |
|
307 | 226 | else: |
|
308 | 227 | f.__annotations__ = kwargs |
|
309 | 228 | return f |
|
310 | 229 | return dec |
|
311 | 230 | |
|
312 | 231 | |
|
313 | 232 | # Parts below taken from six: |
|
314 | 233 | # Copyright (c) 2010-2013 Benjamin Peterson |
|
315 | 234 | # |
|
316 | 235 | # Permission is hereby granted, free of charge, to any person obtaining a copy |
|
317 | 236 | # of this software and associated documentation files (the "Software"), to deal |
|
318 | 237 | # in the Software without restriction, including without limitation the rights |
|
319 | 238 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
320 | 239 | # copies of the Software, and to permit persons to whom the Software is |
|
321 | 240 | # furnished to do so, subject to the following conditions: |
|
322 | 241 | # |
|
323 | 242 | # The above copyright notice and this permission notice shall be included in all |
|
324 | 243 | # copies or substantial portions of the Software. |
|
325 | 244 | # |
|
326 | 245 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
327 | 246 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
328 | 247 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
329 | 248 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
330 | 249 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
331 | 250 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
332 | 251 | # SOFTWARE. |
|
333 | 252 | |
|
334 | 253 | def with_metaclass(meta, *bases): |
|
335 | 254 | """Create a base class with a metaclass.""" |
|
336 | 255 | return meta("_NewBase", bases, {}) |
@@ -1,595 +1,589 b'' | |||
|
1 | 1 | """Patched version of standard library tokenize, to deal with various bugs. |
|
2 | 2 | |
|
3 | 3 | Based on Python 3.2 code. |
|
4 | 4 | |
|
5 | 5 | Patches: |
|
6 | 6 | |
|
7 | 7 | - Gareth Rees' patch for Python issue #12691 (untokenizing) |
|
8 | 8 | - Except we don't encode the output of untokenize |
|
9 | 9 | - Python 2 compatible syntax, so that it can be byte-compiled at installation |
|
10 | 10 | - Newlines in comments and blank lines should be either NL or NEWLINE, depending |
|
11 | 11 | on whether they are in a multi-line statement. Filed as Python issue #17061. |
|
12 | 12 | - Export generate_tokens & TokenError |
|
13 | 13 | - u and rb literals are allowed under Python 3.3 and above. |
|
14 | 14 | |
|
15 | 15 | ------------------------------------------------------------------------------ |
|
16 | 16 | Tokenization help for Python programs. |
|
17 | 17 | |
|
18 | 18 | tokenize(readline) is a generator that breaks a stream of bytes into |
|
19 | 19 | Python tokens. It decodes the bytes according to PEP-0263 for |
|
20 | 20 | determining source file encoding. |
|
21 | 21 | |
|
22 | 22 | It accepts a readline-like method which is called repeatedly to get the |
|
23 | 23 | next line of input (or b"" for EOF). It generates 5-tuples with these |
|
24 | 24 | members: |
|
25 | 25 | |
|
26 | 26 | the token type (see token.py) |
|
27 | 27 | the token (a string) |
|
28 | 28 | the starting (row, column) indices of the token (a 2-tuple of ints) |
|
29 | 29 | the ending (row, column) indices of the token (a 2-tuple of ints) |
|
30 | 30 | the original line (string) |
|
31 | 31 | |
|
32 | 32 | It is designed to match the working of the Python tokenizer exactly, except |
|
33 | 33 | that it produces COMMENT tokens for comments and gives type OP for all |
|
34 | 34 | operators. Additionally, all token lists start with an ENCODING token |
|
35 | 35 | which tells you which encoding was used to decode the bytes stream. |
|
36 | 36 | """ |
|
37 | 37 | |
|
38 | 38 | __author__ = 'Ka-Ping Yee <ping@lfw.org>' |
|
39 | 39 | __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' |
|
40 | 40 | 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' |
|
41 | 41 | 'Michael Foord') |
|
42 | 42 | import builtins |
|
43 | 43 | import re |
|
44 | 44 | import sys |
|
45 | 45 | from token import * |
|
46 | 46 | from codecs import lookup, BOM_UTF8 |
|
47 | 47 | import collections |
|
48 | 48 | from io import TextIOWrapper |
|
49 | 49 | cookie_re = re.compile("coding[:=]\s*([-\w.]+)") |
|
50 | 50 | |
|
51 | 51 | import token |
|
52 | 52 | __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", |
|
53 | 53 | "NL", "untokenize", "ENCODING", "TokenInfo"] |
|
54 | 54 | del token |
|
55 | 55 | |
|
56 | 56 | __all__ += ["generate_tokens", "TokenError"] |
|
57 | 57 | |
|
58 | 58 | COMMENT = N_TOKENS |
|
59 | 59 | tok_name[COMMENT] = 'COMMENT' |
|
60 | 60 | NL = N_TOKENS + 1 |
|
61 | 61 | tok_name[NL] = 'NL' |
|
62 | 62 | ENCODING = N_TOKENS + 2 |
|
63 | 63 | tok_name[ENCODING] = 'ENCODING' |
|
64 | 64 | N_TOKENS += 3 |
|
65 | 65 | |
|
66 | 66 | class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): |
|
67 | 67 | def __repr__(self): |
|
68 | 68 | annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) |
|
69 | 69 | return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % |
|
70 | 70 | self._replace(type=annotated_type)) |
|
71 | 71 | |
|
72 | 72 | def group(*choices): return '(' + '|'.join(choices) + ')' |
|
73 | 73 | def any(*choices): return group(*choices) + '*' |
|
74 | 74 | def maybe(*choices): return group(*choices) + '?' |
|
75 | 75 | |
|
76 | 76 | # Note: we use unicode matching for names ("\w") but ascii matching for |
|
77 | 77 | # number literals. |
|
78 | 78 | Whitespace = r'[ \f\t]*' |
|
79 | 79 | Comment = r'#[^\r\n]*' |
|
80 | 80 | Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) |
|
81 | 81 | Name = r'\w+' |
|
82 | 82 | |
|
83 | 83 | Hexnumber = r'0[xX][0-9a-fA-F]+' |
|
84 | 84 | Binnumber = r'0[bB][01]+' |
|
85 | 85 | Octnumber = r'0[oO][0-7]+' |
|
86 | 86 | Decnumber = r'(?:0+|[1-9][0-9]*)' |
|
87 | 87 | Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) |
|
88 | 88 | Exponent = r'[eE][-+]?[0-9]+' |
|
89 | 89 | Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) |
|
90 | 90 | Expfloat = r'[0-9]+' + Exponent |
|
91 | 91 | Floatnumber = group(Pointfloat, Expfloat) |
|
92 | 92 | Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') |
|
93 | 93 | Number = group(Imagnumber, Floatnumber, Intnumber) |
|
94 | ||
|
95 | if sys.version_info.minor >= 3: | |
|
96 | StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' | |
|
97 | else: | |
|
98 | StringPrefix = r'(?:[bB]?[rR]?)?' | |
|
94 | StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' | |
|
99 | 95 | |
|
100 | 96 | # Tail end of ' string. |
|
101 | 97 | Single = r"[^'\\]*(?:\\.[^'\\]*)*'" |
|
102 | 98 | # Tail end of " string. |
|
103 | 99 | Double = r'[^"\\]*(?:\\.[^"\\]*)*"' |
|
104 | 100 | # Tail end of ''' string. |
|
105 | 101 | Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" |
|
106 | 102 | # Tail end of """ string. |
|
107 | 103 | Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' |
|
108 | 104 | Triple = group(StringPrefix + "'''", StringPrefix + '"""') |
|
109 | 105 | # Single-line ' or " string. |
|
110 | 106 | String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", |
|
111 | 107 | StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') |
|
112 | 108 | |
|
113 | 109 | # Because of leftmost-then-longest match semantics, be sure to put the |
|
114 | 110 | # longest operators first (e.g., if = came before ==, == would get |
|
115 | 111 | # recognized as two instances of =). |
|
116 | 112 | Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", |
|
117 | 113 | r"//=?", r"->", |
|
118 | 114 | r"[+\-*/%&|^=<>]=?", |
|
119 | 115 | r"~") |
|
120 | 116 | |
|
121 | 117 | Bracket = '[][(){}]' |
|
122 | 118 | Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') |
|
123 | 119 | Funny = group(Operator, Bracket, Special) |
|
124 | 120 | |
|
125 | 121 | PlainToken = group(Number, Funny, String, Name) |
|
126 | 122 | Token = Ignore + PlainToken |
|
127 | 123 | |
|
128 | 124 | # First (or only) line of ' or " string. |
|
129 | 125 | ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + |
|
130 | 126 | group("'", r'\\\r?\n'), |
|
131 | 127 | StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + |
|
132 | 128 | group('"', r'\\\r?\n')) |
|
133 | 129 | PseudoExtras = group(r'\\\r?\n', Comment, Triple) |
|
134 | 130 | PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) |
|
135 | 131 | |
|
136 | 132 | def _compile(expr): |
|
137 | 133 | return re.compile(expr, re.UNICODE) |
|
138 | 134 | |
|
139 | 135 | tokenprog, pseudoprog, single3prog, double3prog = map( |
|
140 | 136 | _compile, (Token, PseudoToken, Single3, Double3)) |
|
141 | 137 | endprogs = {"'": _compile(Single), '"': _compile(Double), |
|
142 | 138 | "'''": single3prog, '"""': double3prog, |
|
143 | 139 | "r'''": single3prog, 'r"""': double3prog, |
|
144 | 140 | "b'''": single3prog, 'b"""': double3prog, |
|
145 | 141 | "R'''": single3prog, 'R"""': double3prog, |
|
146 | 142 | "B'''": single3prog, 'B"""': double3prog, |
|
147 | 143 | "br'''": single3prog, 'br"""': double3prog, |
|
148 | 144 | "bR'''": single3prog, 'bR"""': double3prog, |
|
149 | 145 | "Br'''": single3prog, 'Br"""': double3prog, |
|
150 | 146 | "BR'''": single3prog, 'BR"""': double3prog, |
|
151 | 147 | 'r': None, 'R': None, 'b': None, 'B': None} |
|
152 | 148 | |
|
153 | 149 | triple_quoted = {} |
|
154 | 150 | for t in ("'''", '"""', |
|
155 | 151 | "r'''", 'r"""', "R'''", 'R"""', |
|
156 | 152 | "b'''", 'b"""', "B'''", 'B"""', |
|
157 | 153 | "br'''", 'br"""', "Br'''", 'Br"""', |
|
158 | 154 | "bR'''", 'bR"""', "BR'''", 'BR"""'): |
|
159 | 155 | triple_quoted[t] = t |
|
160 | 156 | single_quoted = {} |
|
161 | 157 | for t in ("'", '"', |
|
162 | 158 | "r'", 'r"', "R'", 'R"', |
|
163 | 159 | "b'", 'b"', "B'", 'B"', |
|
164 | 160 | "br'", 'br"', "Br'", 'Br"', |
|
165 | 161 | "bR'", 'bR"', "BR'", 'BR"' ): |
|
166 | 162 | single_quoted[t] = t |
|
167 | 163 | |
|
168 | if sys.version_info.minor >= 3: | |
|
169 | # Python 3.3 | |
|
170 | for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']: | |
|
171 | _t2 = _prefix+'"""' | |
|
172 | endprogs[_t2] = double3prog | |
|
173 | triple_quoted[_t2] = _t2 | |
|
174 | _t1 = _prefix + "'''" | |
|
175 | endprogs[_t1] = single3prog | |
|
176 | triple_quoted[_t1] = _t1 | |
|
177 | single_quoted[_prefix+'"'] = _prefix+'"' | |
|
178 | single_quoted[_prefix+"'"] = _prefix+"'" | |
|
179 | del _prefix, _t2, _t1 | |
|
180 | endprogs['u'] = None | |
|
181 | endprogs['U'] = None | |
|
164 | for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']: | |
|
165 | _t2 = _prefix+'"""' | |
|
166 | endprogs[_t2] = double3prog | |
|
167 | triple_quoted[_t2] = _t2 | |
|
168 | _t1 = _prefix + "'''" | |
|
169 | endprogs[_t1] = single3prog | |
|
170 | triple_quoted[_t1] = _t1 | |
|
171 | single_quoted[_prefix+'"'] = _prefix+'"' | |
|
172 | single_quoted[_prefix+"'"] = _prefix+"'" | |
|
173 | del _prefix, _t2, _t1 | |
|
174 | endprogs['u'] = None | |
|
175 | endprogs['U'] = None | |
|
182 | 176 | |
|
183 | 177 | del _compile |
|
184 | 178 | |
|
185 | 179 | tabsize = 8 |
|
186 | 180 | |
|
187 | 181 | class TokenError(Exception): pass |
|
188 | 182 | |
|
189 | 183 | class StopTokenizing(Exception): pass |
|
190 | 184 | |
|
191 | 185 | |
|
192 | 186 | class Untokenizer: |
|
193 | 187 | |
|
194 | 188 | def __init__(self): |
|
195 | 189 | self.tokens = [] |
|
196 | 190 | self.prev_row = 1 |
|
197 | 191 | self.prev_col = 0 |
|
198 | 192 | self.encoding = 'utf-8' |
|
199 | 193 | |
|
200 | 194 | def add_whitespace(self, tok_type, start): |
|
201 | 195 | row, col = start |
|
202 | 196 | assert row >= self.prev_row |
|
203 | 197 | col_offset = col - self.prev_col |
|
204 | 198 | if col_offset > 0: |
|
205 | 199 | self.tokens.append(" " * col_offset) |
|
206 | 200 | elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): |
|
207 | 201 | # Line was backslash-continued. |
|
208 | 202 | self.tokens.append(" ") |
|
209 | 203 | |
|
210 | 204 | def untokenize(self, tokens): |
|
211 | 205 | iterable = iter(tokens) |
|
212 | 206 | for t in iterable: |
|
213 | 207 | if len(t) == 2: |
|
214 | 208 | self.compat(t, iterable) |
|
215 | 209 | break |
|
216 | 210 | tok_type, token, start, end = t[:4] |
|
217 | 211 | if tok_type == ENCODING: |
|
218 | 212 | self.encoding = token |
|
219 | 213 | continue |
|
220 | 214 | self.add_whitespace(tok_type, start) |
|
221 | 215 | self.tokens.append(token) |
|
222 | 216 | self.prev_row, self.prev_col = end |
|
223 | 217 | if tok_type in (NEWLINE, NL): |
|
224 | 218 | self.prev_row += 1 |
|
225 | 219 | self.prev_col = 0 |
|
226 | 220 | return "".join(self.tokens) |
|
227 | 221 | |
|
228 | 222 | def compat(self, token, iterable): |
|
229 | 223 | # This import is here to avoid problems when the itertools |
|
230 | 224 | # module is not built yet and tokenize is imported. |
|
231 | 225 | from itertools import chain |
|
232 | 226 | startline = False |
|
233 | 227 | prevstring = False |
|
234 | 228 | indents = [] |
|
235 | 229 | toks_append = self.tokens.append |
|
236 | 230 | |
|
237 | 231 | for tok in chain([token], iterable): |
|
238 | 232 | toknum, tokval = tok[:2] |
|
239 | 233 | if toknum == ENCODING: |
|
240 | 234 | self.encoding = tokval |
|
241 | 235 | continue |
|
242 | 236 | |
|
243 | 237 | if toknum in (NAME, NUMBER): |
|
244 | 238 | tokval += ' ' |
|
245 | 239 | |
|
246 | 240 | # Insert a space between two consecutive strings |
|
247 | 241 | if toknum == STRING: |
|
248 | 242 | if prevstring: |
|
249 | 243 | tokval = ' ' + tokval |
|
250 | 244 | prevstring = True |
|
251 | 245 | else: |
|
252 | 246 | prevstring = False |
|
253 | 247 | |
|
254 | 248 | if toknum == INDENT: |
|
255 | 249 | indents.append(tokval) |
|
256 | 250 | continue |
|
257 | 251 | elif toknum == DEDENT: |
|
258 | 252 | indents.pop() |
|
259 | 253 | continue |
|
260 | 254 | elif toknum in (NEWLINE, NL): |
|
261 | 255 | startline = True |
|
262 | 256 | elif startline and indents: |
|
263 | 257 | toks_append(indents[-1]) |
|
264 | 258 | startline = False |
|
265 | 259 | toks_append(tokval) |
|
266 | 260 | |
|
267 | 261 | |
|
268 | 262 | def untokenize(tokens): |
|
269 | 263 | """ |
|
270 | 264 | Convert ``tokens`` (an iterable) back into Python source code. Return |
|
271 | 265 | a bytes object, encoded using the encoding specified by the last |
|
272 | 266 | ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found. |
|
273 | 267 | |
|
274 | 268 | The result is guaranteed to tokenize back to match the input so that |
|
275 | 269 | the conversion is lossless and round-trips are assured. The |
|
276 | 270 | guarantee applies only to the token type and token string as the |
|
277 | 271 | spacing between tokens (column positions) may change. |
|
278 | 272 | |
|
279 | 273 | :func:`untokenize` has two modes. If the input tokens are sequences |
|
280 | 274 | of length 2 (``type``, ``string``) then spaces are added as necessary to |
|
281 | 275 | preserve the round-trip property. |
|
282 | 276 | |
|
283 | 277 | If the input tokens are sequences of length 4 or more (``type``, |
|
284 | 278 | ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then |
|
285 | 279 | spaces are added so that each token appears in the result at the |
|
286 | 280 | position indicated by ``start`` and ``end``, if possible. |
|
287 | 281 | """ |
|
288 | 282 | return Untokenizer().untokenize(tokens) |
|
289 | 283 | |
|
290 | 284 | |
|
291 | 285 | def _get_normal_name(orig_enc): |
|
292 | 286 | """Imitates get_normal_name in tokenizer.c.""" |
|
293 | 287 | # Only care about the first 12 characters. |
|
294 | 288 | enc = orig_enc[:12].lower().replace("_", "-") |
|
295 | 289 | if enc == "utf-8" or enc.startswith("utf-8-"): |
|
296 | 290 | return "utf-8" |
|
297 | 291 | if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ |
|
298 | 292 | enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): |
|
299 | 293 | return "iso-8859-1" |
|
300 | 294 | return orig_enc |
|
301 | 295 | |
|
302 | 296 | def detect_encoding(readline): |
|
303 | 297 | """ |
|
304 | 298 | The detect_encoding() function is used to detect the encoding that should |
|
305 | 299 | be used to decode a Python source file. It requires one argment, readline, |
|
306 | 300 | in the same way as the tokenize() generator. |
|
307 | 301 | |
|
308 | 302 | It will call readline a maximum of twice, and return the encoding used |
|
309 | 303 | (as a string) and a list of any lines (left as bytes) it has read in. |
|
310 | 304 | |
|
311 | 305 | It detects the encoding from the presence of a utf-8 bom or an encoding |
|
312 | 306 | cookie as specified in pep-0263. If both a bom and a cookie are present, |
|
313 | 307 | but disagree, a SyntaxError will be raised. If the encoding cookie is an |
|
314 | 308 | invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, |
|
315 | 309 | 'utf-8-sig' is returned. |
|
316 | 310 | |
|
317 | 311 | If no encoding is specified, then the default of 'utf-8' will be returned. |
|
318 | 312 | """ |
|
319 | 313 | bom_found = False |
|
320 | 314 | encoding = None |
|
321 | 315 | default = 'utf-8' |
|
322 | 316 | def read_or_stop(): |
|
323 | 317 | try: |
|
324 | 318 | return readline() |
|
325 | 319 | except StopIteration: |
|
326 | 320 | return b'' |
|
327 | 321 | |
|
328 | 322 | def find_cookie(line): |
|
329 | 323 | try: |
|
330 | 324 | # Decode as UTF-8. Either the line is an encoding declaration, |
|
331 | 325 | # in which case it should be pure ASCII, or it must be UTF-8 |
|
332 | 326 | # per default encoding. |
|
333 | 327 | line_string = line.decode('utf-8') |
|
334 | 328 | except UnicodeDecodeError: |
|
335 | 329 | raise SyntaxError("invalid or missing encoding declaration") |
|
336 | 330 | |
|
337 | 331 | matches = cookie_re.findall(line_string) |
|
338 | 332 | if not matches: |
|
339 | 333 | return None |
|
340 | 334 | encoding = _get_normal_name(matches[0]) |
|
341 | 335 | try: |
|
342 | 336 | codec = lookup(encoding) |
|
343 | 337 | except LookupError: |
|
344 | 338 | # This behaviour mimics the Python interpreter |
|
345 | 339 | raise SyntaxError("unknown encoding: " + encoding) |
|
346 | 340 | |
|
347 | 341 | if bom_found: |
|
348 | 342 | if encoding != 'utf-8': |
|
349 | 343 | # This behaviour mimics the Python interpreter |
|
350 | 344 | raise SyntaxError('encoding problem: utf-8') |
|
351 | 345 | encoding += '-sig' |
|
352 | 346 | return encoding |
|
353 | 347 | |
|
354 | 348 | first = read_or_stop() |
|
355 | 349 | if first.startswith(BOM_UTF8): |
|
356 | 350 | bom_found = True |
|
357 | 351 | first = first[3:] |
|
358 | 352 | default = 'utf-8-sig' |
|
359 | 353 | if not first: |
|
360 | 354 | return default, [] |
|
361 | 355 | |
|
362 | 356 | encoding = find_cookie(first) |
|
363 | 357 | if encoding: |
|
364 | 358 | return encoding, [first] |
|
365 | 359 | |
|
366 | 360 | second = read_or_stop() |
|
367 | 361 | if not second: |
|
368 | 362 | return default, [first] |
|
369 | 363 | |
|
370 | 364 | encoding = find_cookie(second) |
|
371 | 365 | if encoding: |
|
372 | 366 | return encoding, [first, second] |
|
373 | 367 | |
|
374 | 368 | return default, [first, second] |
|
375 | 369 | |
|
376 | 370 | |
|
377 | 371 | def open(filename): |
|
378 | 372 | """Open a file in read only mode using the encoding detected by |
|
379 | 373 | detect_encoding(). |
|
380 | 374 | """ |
|
381 | 375 | buffer = builtins.open(filename, 'rb') |
|
382 | 376 | encoding, lines = detect_encoding(buffer.readline) |
|
383 | 377 | buffer.seek(0) |
|
384 | 378 | text = TextIOWrapper(buffer, encoding, line_buffering=True) |
|
385 | 379 | text.mode = 'r' |
|
386 | 380 | return text |
|
387 | 381 | |
|
388 | 382 | |
|
389 | 383 | def tokenize(readline): |
|
390 | 384 | """ |
|
391 | 385 | The tokenize() generator requires one argment, readline, which |
|
392 | 386 | must be a callable object which provides the same interface as the |
|
393 | 387 | readline() method of built-in file objects. Each call to the function |
|
394 | 388 | should return one line of input as bytes. Alternately, readline |
|
395 | 389 | can be a callable function terminating with :class:`StopIteration`:: |
|
396 | 390 | |
|
397 | 391 | readline = open(myfile, 'rb').__next__ # Example of alternate readline |
|
398 | 392 | |
|
399 | 393 | The generator produces 5-tuples with these members: the token type; the |
|
400 | 394 | token string; a 2-tuple (srow, scol) of ints specifying the row and |
|
401 | 395 | column where the token begins in the source; a 2-tuple (erow, ecol) of |
|
402 | 396 | ints specifying the row and column where the token ends in the source; |
|
403 | 397 | and the line on which the token was found. The line passed is the |
|
404 | 398 | logical line; continuation lines are included. |
|
405 | 399 | |
|
406 | 400 | The first token sequence will always be an ENCODING token |
|
407 | 401 | which tells you which encoding was used to decode the bytes stream. |
|
408 | 402 | """ |
|
409 | 403 | # This import is here to avoid problems when the itertools module is not |
|
410 | 404 | # built yet and tokenize is imported. |
|
411 | 405 | from itertools import chain, repeat |
|
412 | 406 | encoding, consumed = detect_encoding(readline) |
|
413 | 407 | rl_gen = iter(readline, b"") |
|
414 | 408 | empty = repeat(b"") |
|
415 | 409 | return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) |
|
416 | 410 | |
|
417 | 411 | |
|
418 | 412 | def _tokenize(readline, encoding): |
|
419 | 413 | lnum = parenlev = continued = 0 |
|
420 | 414 | numchars = '0123456789' |
|
421 | 415 | contstr, needcont = '', 0 |
|
422 | 416 | contline = None |
|
423 | 417 | indents = [0] |
|
424 | 418 | |
|
425 | 419 | if encoding is not None: |
|
426 | 420 | if encoding == "utf-8-sig": |
|
427 | 421 | # BOM will already have been stripped. |
|
428 | 422 | encoding = "utf-8" |
|
429 | 423 | yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') |
|
430 | 424 | while True: # loop over lines in stream |
|
431 | 425 | try: |
|
432 | 426 | line = readline() |
|
433 | 427 | except StopIteration: |
|
434 | 428 | line = b'' |
|
435 | 429 | |
|
436 | 430 | if encoding is not None: |
|
437 | 431 | line = line.decode(encoding) |
|
438 | 432 | lnum += 1 |
|
439 | 433 | pos, max = 0, len(line) |
|
440 | 434 | |
|
441 | 435 | if contstr: # continued string |
|
442 | 436 | if not line: |
|
443 | 437 | raise TokenError("EOF in multi-line string", strstart) |
|
444 | 438 | endmatch = endprog.match(line) |
|
445 | 439 | if endmatch: |
|
446 | 440 | pos = end = endmatch.end(0) |
|
447 | 441 | yield TokenInfo(STRING, contstr + line[:end], |
|
448 | 442 | strstart, (lnum, end), contline + line) |
|
449 | 443 | contstr, needcont = '', 0 |
|
450 | 444 | contline = None |
|
451 | 445 | elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': |
|
452 | 446 | yield TokenInfo(ERRORTOKEN, contstr + line, |
|
453 | 447 | strstart, (lnum, len(line)), contline) |
|
454 | 448 | contstr = '' |
|
455 | 449 | contline = None |
|
456 | 450 | continue |
|
457 | 451 | else: |
|
458 | 452 | contstr = contstr + line |
|
459 | 453 | contline = contline + line |
|
460 | 454 | continue |
|
461 | 455 | |
|
462 | 456 | elif parenlev == 0 and not continued: # new statement |
|
463 | 457 | if not line: break |
|
464 | 458 | column = 0 |
|
465 | 459 | while pos < max: # measure leading whitespace |
|
466 | 460 | if line[pos] == ' ': |
|
467 | 461 | column += 1 |
|
468 | 462 | elif line[pos] == '\t': |
|
469 | 463 | column = (column//tabsize + 1)*tabsize |
|
470 | 464 | elif line[pos] == '\f': |
|
471 | 465 | column = 0 |
|
472 | 466 | else: |
|
473 | 467 | break |
|
474 | 468 | pos += 1 |
|
475 | 469 | if pos == max: |
|
476 | 470 | break |
|
477 | 471 | |
|
478 | 472 | if line[pos] in '#\r\n': # skip comments or blank lines |
|
479 | 473 | if line[pos] == '#': |
|
480 | 474 | comment_token = line[pos:].rstrip('\r\n') |
|
481 | 475 | nl_pos = pos + len(comment_token) |
|
482 | 476 | yield TokenInfo(COMMENT, comment_token, |
|
483 | 477 | (lnum, pos), (lnum, pos + len(comment_token)), line) |
|
484 | 478 | yield TokenInfo(NEWLINE, line[nl_pos:], |
|
485 | 479 | (lnum, nl_pos), (lnum, len(line)), line) |
|
486 | 480 | else: |
|
487 | 481 | yield TokenInfo(NEWLINE, line[pos:], |
|
488 | 482 | (lnum, pos), (lnum, len(line)), line) |
|
489 | 483 | continue |
|
490 | 484 | |
|
491 | 485 | if column > indents[-1]: # count indents or dedents |
|
492 | 486 | indents.append(column) |
|
493 | 487 | yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) |
|
494 | 488 | while column < indents[-1]: |
|
495 | 489 | if column not in indents: |
|
496 | 490 | raise IndentationError( |
|
497 | 491 | "unindent does not match any outer indentation level", |
|
498 | 492 | ("<tokenize>", lnum, pos, line)) |
|
499 | 493 | indents = indents[:-1] |
|
500 | 494 | yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) |
|
501 | 495 | |
|
502 | 496 | else: # continued statement |
|
503 | 497 | if not line: |
|
504 | 498 | raise TokenError("EOF in multi-line statement", (lnum, 0)) |
|
505 | 499 | continued = 0 |
|
506 | 500 | |
|
507 | 501 | while pos < max: |
|
508 | 502 | pseudomatch = pseudoprog.match(line, pos) |
|
509 | 503 | if pseudomatch: # scan for tokens |
|
510 | 504 | start, end = pseudomatch.span(1) |
|
511 | 505 | spos, epos, pos = (lnum, start), (lnum, end), end |
|
512 | 506 | token, initial = line[start:end], line[start] |
|
513 | 507 | |
|
514 | 508 | if (initial in numchars or # ordinary number |
|
515 | 509 | (initial == '.' and token != '.' and token != '...')): |
|
516 | 510 | yield TokenInfo(NUMBER, token, spos, epos, line) |
|
517 | 511 | elif initial in '\r\n': |
|
518 | 512 | yield TokenInfo(NL if parenlev > 0 else NEWLINE, |
|
519 | 513 | token, spos, epos, line) |
|
520 | 514 | elif initial == '#': |
|
521 | 515 | assert not token.endswith("\n") |
|
522 | 516 | yield TokenInfo(COMMENT, token, spos, epos, line) |
|
523 | 517 | elif token in triple_quoted: |
|
524 | 518 | endprog = endprogs[token] |
|
525 | 519 | endmatch = endprog.match(line, pos) |
|
526 | 520 | if endmatch: # all on one line |
|
527 | 521 | pos = endmatch.end(0) |
|
528 | 522 | token = line[start:pos] |
|
529 | 523 | yield TokenInfo(STRING, token, spos, (lnum, pos), line) |
|
530 | 524 | else: |
|
531 | 525 | strstart = (lnum, start) # multiple lines |
|
532 | 526 | contstr = line[start:] |
|
533 | 527 | contline = line |
|
534 | 528 | break |
|
535 | 529 | elif initial in single_quoted or \ |
|
536 | 530 | token[:2] in single_quoted or \ |
|
537 | 531 | token[:3] in single_quoted: |
|
538 | 532 | if token[-1] == '\n': # continued string |
|
539 | 533 | strstart = (lnum, start) |
|
540 | 534 | endprog = (endprogs[initial] or endprogs[token[1]] or |
|
541 | 535 | endprogs[token[2]]) |
|
542 | 536 | contstr, needcont = line[start:], 1 |
|
543 | 537 | contline = line |
|
544 | 538 | break |
|
545 | 539 | else: # ordinary string |
|
546 | 540 | yield TokenInfo(STRING, token, spos, epos, line) |
|
547 | 541 | elif initial.isidentifier(): # ordinary name |
|
548 | 542 | yield TokenInfo(NAME, token, spos, epos, line) |
|
549 | 543 | elif initial == '\\': # continued stmt |
|
550 | 544 | continued = 1 |
|
551 | 545 | else: |
|
552 | 546 | if initial in '([{': |
|
553 | 547 | parenlev += 1 |
|
554 | 548 | elif initial in ')]}': |
|
555 | 549 | parenlev -= 1 |
|
556 | 550 | yield TokenInfo(OP, token, spos, epos, line) |
|
557 | 551 | else: |
|
558 | 552 | yield TokenInfo(ERRORTOKEN, line[pos], |
|
559 | 553 | (lnum, pos), (lnum, pos+1), line) |
|
560 | 554 | pos += 1 |
|
561 | 555 | |
|
562 | 556 | for indent in indents[1:]: # pop remaining indent levels |
|
563 | 557 | yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') |
|
564 | 558 | yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') |
|
565 | 559 | |
|
566 | 560 | |
|
567 | 561 | # An undocumented, backwards compatible, API for all the places in the standard |
|
568 | 562 | # library that expect to be able to use tokenize with strings |
|
569 | 563 | def generate_tokens(readline): |
|
570 | 564 | return _tokenize(readline, None) |
|
571 | 565 | |
|
572 | 566 | if __name__ == "__main__": |
|
573 | 567 | # Quick sanity check |
|
574 | 568 | s = b'''def parseline(self, line): |
|
575 | 569 | """Parse the line into a command name and a string containing |
|
576 | 570 | the arguments. Returns a tuple containing (command, args, line). |
|
577 | 571 | 'command' and 'args' may be None if the line couldn't be parsed. |
|
578 | 572 | """ |
|
579 | 573 | line = line.strip() |
|
580 | 574 | if not line: |
|
581 | 575 | return None, None, line |
|
582 | 576 | elif line[0] == '?': |
|
583 | 577 | line = 'help ' + line[1:] |
|
584 | 578 | elif line[0] == '!': |
|
585 | 579 | if hasattr(self, 'do_shell'): |
|
586 | 580 | line = 'shell ' + line[1:] |
|
587 | 581 | else: |
|
588 | 582 | return None, None, line |
|
589 | 583 | i, n = 0, len(line) |
|
590 | 584 | while i < n and line[i] in self.identchars: i = i+1 |
|
591 | 585 | cmd, arg = line[:i], line[i:].strip() |
|
592 | 586 | return cmd, arg, line |
|
593 | 587 | ''' |
|
594 | 588 | for tok in tokenize(iter(s.splitlines()).__next__): |
|
595 | 589 | print(tok) |
@@ -1,468 +1,462 b'' | |||
|
1 | 1 | # encoding: utf-8 |
|
2 | 2 | """ |
|
3 | 3 | This module defines the things that are used in setup.py for building IPython |
|
4 | 4 | |
|
5 | 5 | This includes: |
|
6 | 6 | |
|
7 | 7 | * The basic arguments to setup |
|
8 | 8 | * Functions for finding things like packages, package data, etc. |
|
9 | 9 | * A function for checking dependencies. |
|
10 | 10 | """ |
|
11 | 11 | |
|
12 | 12 | # Copyright (c) IPython Development Team. |
|
13 | 13 | # Distributed under the terms of the Modified BSD License. |
|
14 | 14 | |
|
15 | 15 | |
|
16 | 16 | import re |
|
17 | 17 | import os |
|
18 | 18 | import sys |
|
19 | 19 | |
|
20 | 20 | from distutils import log |
|
21 | 21 | from distutils.command.build_py import build_py |
|
22 | 22 | from distutils.command.build_scripts import build_scripts |
|
23 | 23 | from distutils.command.install import install |
|
24 | 24 | from distutils.command.install_scripts import install_scripts |
|
25 | 25 | from distutils.cmd import Command |
|
26 | 26 | from glob import glob |
|
27 | 27 | |
|
28 | 28 | from setupext import install_data_ext |
|
29 | 29 | |
|
30 | 30 | #------------------------------------------------------------------------------- |
|
31 | 31 | # Useful globals and utility functions |
|
32 | 32 | #------------------------------------------------------------------------------- |
|
33 | 33 | |
|
34 | 34 | # A few handy globals |
|
35 | 35 | isfile = os.path.isfile |
|
36 | 36 | pjoin = os.path.join |
|
37 | 37 | repo_root = os.path.dirname(os.path.abspath(__file__)) |
|
38 | 38 | |
|
39 | 39 | def oscmd(s): |
|
40 | 40 | print(">", s) |
|
41 | 41 | os.system(s) |
|
42 | 42 | |
|
43 | # Py3 compatibility hacks, without assuming IPython itself is installed with | |
|
44 | # the full py3compat machinery. | |
|
45 | ||
|
46 | try: | |
|
47 | execfile | |
|
48 | except NameError: | |
|
49 | def execfile(fname, globs, locs=None): | |
|
50 | locs = locs or globs | |
|
51 | exec(compile(open(fname).read(), fname, "exec"), globs, locs) | |
|
43 | def execfile(fname, globs, locs=None): | |
|
44 | locs = locs or globs | |
|
45 | exec(compile(open(fname).read(), fname, "exec"), globs, locs) | |
|
52 | 46 | |
|
53 | 47 | # A little utility we'll need below, since glob() does NOT allow you to do |
|
54 | 48 | # exclusion on multiple endings! |
|
55 | 49 | def file_doesnt_endwith(test,endings): |
|
56 | 50 | """Return true if test is a file and its name does NOT end with any |
|
57 | 51 | of the strings listed in endings.""" |
|
58 | 52 | if not isfile(test): |
|
59 | 53 | return False |
|
60 | 54 | for e in endings: |
|
61 | 55 | if test.endswith(e): |
|
62 | 56 | return False |
|
63 | 57 | return True |
|
64 | 58 | |
|
65 | 59 | #--------------------------------------------------------------------------- |
|
66 | 60 | # Basic project information |
|
67 | 61 | #--------------------------------------------------------------------------- |
|
68 | 62 | |
|
69 | 63 | # release.py contains version, authors, license, url, keywords, etc. |
|
70 | 64 | execfile(pjoin(repo_root, 'IPython','core','release.py'), globals()) |
|
71 | 65 | |
|
72 | 66 | # Create a dict with the basic information |
|
73 | 67 | # This dict is eventually passed to setup after additional keys are added. |
|
74 | 68 | setup_args = dict( |
|
75 | 69 | name = name, |
|
76 | 70 | version = version, |
|
77 | 71 | description = description, |
|
78 | 72 | long_description = long_description, |
|
79 | 73 | author = author, |
|
80 | 74 | author_email = author_email, |
|
81 | 75 | url = url, |
|
82 | 76 | license = license, |
|
83 | 77 | platforms = platforms, |
|
84 | 78 | keywords = keywords, |
|
85 | 79 | classifiers = classifiers, |
|
86 | 80 | cmdclass = {'install_data': install_data_ext}, |
|
87 | 81 | ) |
|
88 | 82 | |
|
89 | 83 | |
|
90 | 84 | #--------------------------------------------------------------------------- |
|
91 | 85 | # Find packages |
|
92 | 86 | #--------------------------------------------------------------------------- |
|
93 | 87 | |
|
94 | 88 | def find_packages(): |
|
95 | 89 | """ |
|
96 | 90 | Find all of IPython's packages. |
|
97 | 91 | """ |
|
98 | 92 | excludes = ['deathrow', 'quarantine'] |
|
99 | 93 | packages = [] |
|
100 | 94 | for dir,subdirs,files in os.walk('IPython'): |
|
101 | 95 | package = dir.replace(os.path.sep, '.') |
|
102 | 96 | if any(package.startswith('IPython.'+exc) for exc in excludes): |
|
103 | 97 | # package is to be excluded (e.g. deathrow) |
|
104 | 98 | continue |
|
105 | 99 | if '__init__.py' not in files: |
|
106 | 100 | # not a package |
|
107 | 101 | continue |
|
108 | 102 | packages.append(package) |
|
109 | 103 | return packages |
|
110 | 104 | |
|
111 | 105 | #--------------------------------------------------------------------------- |
|
112 | 106 | # Find package data |
|
113 | 107 | #--------------------------------------------------------------------------- |
|
114 | 108 | |
|
115 | 109 | def find_package_data(): |
|
116 | 110 | """ |
|
117 | 111 | Find IPython's package_data. |
|
118 | 112 | """ |
|
119 | 113 | # This is not enough for these things to appear in an sdist. |
|
120 | 114 | # We need to muck with the MANIFEST to get this to work |
|
121 | 115 | |
|
122 | 116 | package_data = { |
|
123 | 117 | 'IPython.core' : ['profile/README*'], |
|
124 | 118 | 'IPython.core.tests' : ['*.png', '*.jpg', 'daft_extension/*.py'], |
|
125 | 119 | 'IPython.lib.tests' : ['*.wav'], |
|
126 | 120 | 'IPython.testing.plugin' : ['*.txt'], |
|
127 | 121 | } |
|
128 | 122 | |
|
129 | 123 | return package_data |
|
130 | 124 | |
|
131 | 125 | |
|
132 | 126 | def check_package_data(package_data): |
|
133 | 127 | """verify that package_data globs make sense""" |
|
134 | 128 | print("checking package data") |
|
135 | 129 | for pkg, data in package_data.items(): |
|
136 | 130 | pkg_root = pjoin(*pkg.split('.')) |
|
137 | 131 | for d in data: |
|
138 | 132 | path = pjoin(pkg_root, d) |
|
139 | 133 | if '*' in path: |
|
140 | 134 | assert len(glob(path)) > 0, "No files match pattern %s" % path |
|
141 | 135 | else: |
|
142 | 136 | assert os.path.exists(path), "Missing package data: %s" % path |
|
143 | 137 | |
|
144 | 138 | |
|
145 | 139 | def check_package_data_first(command): |
|
146 | 140 | """decorator for checking package_data before running a given command |
|
147 | 141 | |
|
148 | 142 | Probably only needs to wrap build_py |
|
149 | 143 | """ |
|
150 | 144 | class DecoratedCommand(command): |
|
151 | 145 | def run(self): |
|
152 | 146 | check_package_data(self.package_data) |
|
153 | 147 | command.run(self) |
|
154 | 148 | return DecoratedCommand |
|
155 | 149 | |
|
156 | 150 | |
|
157 | 151 | #--------------------------------------------------------------------------- |
|
158 | 152 | # Find data files |
|
159 | 153 | #--------------------------------------------------------------------------- |
|
160 | 154 | |
|
161 | 155 | def make_dir_struct(tag,base,out_base): |
|
162 | 156 | """Make the directory structure of all files below a starting dir. |
|
163 | 157 | |
|
164 | 158 | This is just a convenience routine to help build a nested directory |
|
165 | 159 | hierarchy because distutils is too stupid to do this by itself. |
|
166 | 160 | |
|
167 | 161 | XXX - this needs a proper docstring! |
|
168 | 162 | """ |
|
169 | 163 | |
|
170 | 164 | # we'll use these a lot below |
|
171 | 165 | lbase = len(base) |
|
172 | 166 | pathsep = os.path.sep |
|
173 | 167 | lpathsep = len(pathsep) |
|
174 | 168 | |
|
175 | 169 | out = [] |
|
176 | 170 | for (dirpath,dirnames,filenames) in os.walk(base): |
|
177 | 171 | # we need to strip out the dirpath from the base to map it to the |
|
178 | 172 | # output (installation) path. This requires possibly stripping the |
|
179 | 173 | # path separator, because otherwise pjoin will not work correctly |
|
180 | 174 | # (pjoin('foo/','/bar') returns '/bar'). |
|
181 | 175 | |
|
182 | 176 | dp_eff = dirpath[lbase:] |
|
183 | 177 | if dp_eff.startswith(pathsep): |
|
184 | 178 | dp_eff = dp_eff[lpathsep:] |
|
185 | 179 | # The output path must be anchored at the out_base marker |
|
186 | 180 | out_path = pjoin(out_base,dp_eff) |
|
187 | 181 | # Now we can generate the final filenames. Since os.walk only produces |
|
188 | 182 | # filenames, we must join back with the dirpath to get full valid file |
|
189 | 183 | # paths: |
|
190 | 184 | pfiles = [pjoin(dirpath,f) for f in filenames] |
|
191 | 185 | # Finally, generate the entry we need, which is a pari of (output |
|
192 | 186 | # path, files) for use as a data_files parameter in install_data. |
|
193 | 187 | out.append((out_path, pfiles)) |
|
194 | 188 | |
|
195 | 189 | return out |
|
196 | 190 | |
|
197 | 191 | |
|
198 | 192 | def find_data_files(): |
|
199 | 193 | """ |
|
200 | 194 | Find IPython's data_files. |
|
201 | 195 | |
|
202 | 196 | Just man pages at this point. |
|
203 | 197 | """ |
|
204 | 198 | |
|
205 | 199 | manpagebase = pjoin('share', 'man', 'man1') |
|
206 | 200 | |
|
207 | 201 | # Simple file lists can be made by hand |
|
208 | 202 | manpages = [f for f in glob(pjoin('docs','man','*.1.gz')) if isfile(f)] |
|
209 | 203 | if not manpages: |
|
210 | 204 | # When running from a source tree, the manpages aren't gzipped |
|
211 | 205 | manpages = [f for f in glob(pjoin('docs','man','*.1')) if isfile(f)] |
|
212 | 206 | |
|
213 | 207 | # And assemble the entire output list |
|
214 | 208 | data_files = [ (manpagebase, manpages) ] |
|
215 | 209 | |
|
216 | 210 | return data_files |
|
217 | 211 | |
|
218 | 212 | |
|
219 | 213 | def make_man_update_target(manpage): |
|
220 | 214 | """Return a target_update-compliant tuple for the given manpage. |
|
221 | 215 | |
|
222 | 216 | Parameters |
|
223 | 217 | ---------- |
|
224 | 218 | manpage : string |
|
225 | 219 | Name of the manpage, must include the section number (trailing number). |
|
226 | 220 | |
|
227 | 221 | Example |
|
228 | 222 | ------- |
|
229 | 223 | |
|
230 | 224 | >>> make_man_update_target('ipython.1') #doctest: +NORMALIZE_WHITESPACE |
|
231 | 225 | ('docs/man/ipython.1.gz', |
|
232 | 226 | ['docs/man/ipython.1'], |
|
233 | 227 | 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz') |
|
234 | 228 | """ |
|
235 | 229 | man_dir = pjoin('docs', 'man') |
|
236 | 230 | manpage_gz = manpage + '.gz' |
|
237 | 231 | manpath = pjoin(man_dir, manpage) |
|
238 | 232 | manpath_gz = pjoin(man_dir, manpage_gz) |
|
239 | 233 | gz_cmd = ( "cd %(man_dir)s && gzip -9c %(manpage)s > %(manpage_gz)s" % |
|
240 | 234 | locals() ) |
|
241 | 235 | return (manpath_gz, [manpath], gz_cmd) |
|
242 | 236 | |
|
243 | 237 | # The two functions below are copied from IPython.utils.path, so we don't need |
|
244 | 238 | # to import IPython during setup, which fails on Python 3. |
|
245 | 239 | |
|
246 | 240 | def target_outdated(target,deps): |
|
247 | 241 | """Determine whether a target is out of date. |
|
248 | 242 | |
|
249 | 243 | target_outdated(target,deps) -> 1/0 |
|
250 | 244 | |
|
251 | 245 | deps: list of filenames which MUST exist. |
|
252 | 246 | target: single filename which may or may not exist. |
|
253 | 247 | |
|
254 | 248 | If target doesn't exist or is older than any file listed in deps, return |
|
255 | 249 | true, otherwise return false. |
|
256 | 250 | """ |
|
257 | 251 | try: |
|
258 | 252 | target_time = os.path.getmtime(target) |
|
259 | 253 | except os.error: |
|
260 | 254 | return 1 |
|
261 | 255 | for dep in deps: |
|
262 | 256 | dep_time = os.path.getmtime(dep) |
|
263 | 257 | if dep_time > target_time: |
|
264 | 258 | #print "For target",target,"Dep failed:",dep # dbg |
|
265 | 259 | #print "times (dep,tar):",dep_time,target_time # dbg |
|
266 | 260 | return 1 |
|
267 | 261 | return 0 |
|
268 | 262 | |
|
269 | 263 | |
|
270 | 264 | def target_update(target,deps,cmd): |
|
271 | 265 | """Update a target with a given command given a list of dependencies. |
|
272 | 266 | |
|
273 | 267 | target_update(target,deps,cmd) -> runs cmd if target is outdated. |
|
274 | 268 | |
|
275 | 269 | This is just a wrapper around target_outdated() which calls the given |
|
276 | 270 | command if target is outdated.""" |
|
277 | 271 | |
|
278 | 272 | if target_outdated(target,deps): |
|
279 | 273 | os.system(cmd) |
|
280 | 274 | |
|
281 | 275 | #--------------------------------------------------------------------------- |
|
282 | 276 | # Find scripts |
|
283 | 277 | #--------------------------------------------------------------------------- |
|
284 | 278 | |
|
285 | 279 | def find_entry_points(): |
|
286 | 280 | """Defines the command line entry points for IPython |
|
287 | 281 | |
|
288 | 282 | This always uses setuptools-style entry points. When setuptools is not in |
|
289 | 283 | use, our own build_scripts_entrypt class below parses these and builds |
|
290 | 284 | command line scripts. |
|
291 | 285 | |
|
292 | 286 | Each of our entry points gets both a plain name, e.g. ipython, and one |
|
293 | 287 | suffixed with the Python major version number, e.g. ipython3. |
|
294 | 288 | """ |
|
295 | 289 | ep = [ |
|
296 | 290 | 'ipython%s = IPython:start_ipython', |
|
297 | 291 | 'iptest%s = IPython.testing.iptestcontroller:main', |
|
298 | 292 | ] |
|
299 | 293 | suffix = str(sys.version_info[0]) |
|
300 | 294 | return [e % '' for e in ep] + [e % suffix for e in ep] |
|
301 | 295 | |
|
302 | 296 | script_src = """#!{executable} |
|
303 | 297 | # This script was automatically generated by setup.py |
|
304 | 298 | if __name__ == '__main__': |
|
305 | 299 | from {mod} import {func} |
|
306 | 300 | {func}() |
|
307 | 301 | """ |
|
308 | 302 | |
|
309 | 303 | class build_scripts_entrypt(build_scripts): |
|
310 | 304 | """Build the command line scripts |
|
311 | 305 | |
|
312 | 306 | Parse setuptools style entry points and write simple scripts to run the |
|
313 | 307 | target functions. |
|
314 | 308 | |
|
315 | 309 | On Windows, this also creates .cmd wrappers for the scripts so that you can |
|
316 | 310 | easily launch them from a command line. |
|
317 | 311 | """ |
|
318 | 312 | def run(self): |
|
319 | 313 | self.mkpath(self.build_dir) |
|
320 | 314 | outfiles = [] |
|
321 | 315 | for script in find_entry_points(): |
|
322 | 316 | name, entrypt = script.split('=') |
|
323 | 317 | name = name.strip() |
|
324 | 318 | entrypt = entrypt.strip() |
|
325 | 319 | outfile = os.path.join(self.build_dir, name) |
|
326 | 320 | outfiles.append(outfile) |
|
327 | 321 | print('Writing script to', outfile) |
|
328 | 322 | |
|
329 | 323 | mod, func = entrypt.split(':') |
|
330 | 324 | with open(outfile, 'w') as f: |
|
331 | 325 | f.write(script_src.format(executable=sys.executable, |
|
332 | 326 | mod=mod, func=func)) |
|
333 | 327 | |
|
334 | 328 | if sys.platform == 'win32': |
|
335 | 329 | # Write .cmd wrappers for Windows so 'ipython' etc. work at the |
|
336 | 330 | # command line |
|
337 | 331 | cmd_file = os.path.join(self.build_dir, name + '.cmd') |
|
338 | 332 | cmd = '@"{python}" "%~dp0\{script}" %*\r\n'.format( |
|
339 | 333 | python=sys.executable, script=name) |
|
340 | 334 | log.info("Writing %s wrapper script" % cmd_file) |
|
341 | 335 | with open(cmd_file, 'w') as f: |
|
342 | 336 | f.write(cmd) |
|
343 | 337 | |
|
344 | 338 | return outfiles, outfiles |
|
345 | 339 | |
|
346 | 340 | class install_lib_symlink(Command): |
|
347 | 341 | user_options = [ |
|
348 | 342 | ('install-dir=', 'd', "directory to install to"), |
|
349 | 343 | ] |
|
350 | 344 | |
|
351 | 345 | def initialize_options(self): |
|
352 | 346 | self.install_dir = None |
|
353 | 347 | |
|
354 | 348 | def finalize_options(self): |
|
355 | 349 | self.set_undefined_options('symlink', |
|
356 | 350 | ('install_lib', 'install_dir'), |
|
357 | 351 | ) |
|
358 | 352 | |
|
359 | 353 | def run(self): |
|
360 | 354 | if sys.platform == 'win32': |
|
361 | 355 | raise Exception("This doesn't work on Windows.") |
|
362 | 356 | pkg = os.path.join(os.getcwd(), 'IPython') |
|
363 | 357 | dest = os.path.join(self.install_dir, 'IPython') |
|
364 | 358 | if os.path.islink(dest): |
|
365 | 359 | print('removing existing symlink at %s' % dest) |
|
366 | 360 | os.unlink(dest) |
|
367 | 361 | print('symlinking %s -> %s' % (pkg, dest)) |
|
368 | 362 | os.symlink(pkg, dest) |
|
369 | 363 | |
|
370 | 364 | class unsymlink(install): |
|
371 | 365 | def run(self): |
|
372 | 366 | dest = os.path.join(self.install_lib, 'IPython') |
|
373 | 367 | if os.path.islink(dest): |
|
374 | 368 | print('removing symlink at %s' % dest) |
|
375 | 369 | os.unlink(dest) |
|
376 | 370 | else: |
|
377 | 371 | print('No symlink exists at %s' % dest) |
|
378 | 372 | |
|
379 | 373 | class install_symlinked(install): |
|
380 | 374 | def run(self): |
|
381 | 375 | if sys.platform == 'win32': |
|
382 | 376 | raise Exception("This doesn't work on Windows.") |
|
383 | 377 | |
|
384 | 378 | # Run all sub-commands (at least those that need to be run) |
|
385 | 379 | for cmd_name in self.get_sub_commands(): |
|
386 | 380 | self.run_command(cmd_name) |
|
387 | 381 | |
|
388 | 382 | # 'sub_commands': a list of commands this command might have to run to |
|
389 | 383 | # get its work done. See cmd.py for more info. |
|
390 | 384 | sub_commands = [('install_lib_symlink', lambda self:True), |
|
391 | 385 | ('install_scripts_sym', lambda self:True), |
|
392 | 386 | ] |
|
393 | 387 | |
|
394 | 388 | class install_scripts_for_symlink(install_scripts): |
|
395 | 389 | """Redefined to get options from 'symlink' instead of 'install'. |
|
396 | 390 | |
|
397 | 391 | I love distutils almost as much as I love setuptools. |
|
398 | 392 | """ |
|
399 | 393 | def finalize_options(self): |
|
400 | 394 | self.set_undefined_options('build', ('build_scripts', 'build_dir')) |
|
401 | 395 | self.set_undefined_options('symlink', |
|
402 | 396 | ('install_scripts', 'install_dir'), |
|
403 | 397 | ('force', 'force'), |
|
404 | 398 | ('skip_build', 'skip_build'), |
|
405 | 399 | ) |
|
406 | 400 | |
|
407 | 401 | |
|
408 | 402 | #--------------------------------------------------------------------------- |
|
409 | 403 | # VCS related |
|
410 | 404 | #--------------------------------------------------------------------------- |
|
411 | 405 | |
|
412 | 406 | |
|
413 | 407 | def git_prebuild(pkg_dir, build_cmd=build_py): |
|
414 | 408 | """Return extended build or sdist command class for recording commit |
|
415 | 409 | |
|
416 | 410 | records git commit in IPython.utils._sysinfo.commit |
|
417 | 411 | |
|
418 | 412 | for use in IPython.utils.sysinfo.sys_info() calls after installation. |
|
419 | 413 | """ |
|
420 | 414 | |
|
421 | 415 | class MyBuildPy(build_cmd): |
|
422 | 416 | ''' Subclass to write commit data into installation tree ''' |
|
423 | 417 | def run(self): |
|
424 | 418 | # loose as `.dev` is suppose to be invalid |
|
425 | 419 | print("check version number") |
|
426 | 420 | loose_pep440re = re.compile('^(\d+)\.(\d+)\.(\d+((a|b|rc)\d+)?)(\.post\d+)?(\.dev\d*)?$') |
|
427 | 421 | if not loose_pep440re.match(version): |
|
428 | 422 | raise ValueError("Version number '%s' is not valid (should match [N!]N(.N)*[{a|b|rc}N][.postN][.devN])" % version) |
|
429 | 423 | |
|
430 | 424 | |
|
431 | 425 | build_cmd.run(self) |
|
432 | 426 | # this one will only fire for build commands |
|
433 | 427 | if hasattr(self, 'build_lib'): |
|
434 | 428 | self._record_commit(self.build_lib) |
|
435 | 429 | |
|
436 | 430 | def make_release_tree(self, base_dir, files): |
|
437 | 431 | # this one will fire for sdist |
|
438 | 432 | build_cmd.make_release_tree(self, base_dir, files) |
|
439 | 433 | self._record_commit(base_dir) |
|
440 | 434 | |
|
441 | 435 | def _record_commit(self, base_dir): |
|
442 | 436 | import subprocess |
|
443 | 437 | proc = subprocess.Popen('git rev-parse --short HEAD', |
|
444 | 438 | stdout=subprocess.PIPE, |
|
445 | 439 | stderr=subprocess.PIPE, |
|
446 | 440 | shell=True) |
|
447 | 441 | repo_commit, _ = proc.communicate() |
|
448 | 442 | repo_commit = repo_commit.strip().decode("ascii") |
|
449 | 443 | |
|
450 | 444 | out_pth = pjoin(base_dir, pkg_dir, 'utils', '_sysinfo.py') |
|
451 | 445 | if os.path.isfile(out_pth) and not repo_commit: |
|
452 | 446 | # nothing to write, don't clobber |
|
453 | 447 | return |
|
454 | 448 | |
|
455 | 449 | print("writing git commit '%s' to %s" % (repo_commit, out_pth)) |
|
456 | 450 | |
|
457 | 451 | # remove to avoid overwriting original via hard link |
|
458 | 452 | try: |
|
459 | 453 | os.remove(out_pth) |
|
460 | 454 | except (IOError, OSError): |
|
461 | 455 | pass |
|
462 | 456 | with open(out_pth, 'w') as out_file: |
|
463 | 457 | out_file.writelines([ |
|
464 | 458 | '# GENERATED BY setup.py\n', |
|
465 | 459 | 'commit = u"%s"\n' % repo_commit, |
|
466 | 460 | ]) |
|
467 | 461 | return MyBuildPy |
|
468 | 462 |
@@ -1,28 +1,28 b'' | |||
|
1 | 1 | ; Tox (http://tox.testrun.org/) is a virtualenv manager for running tests in |
|
2 | 2 | ; multiple environments. This configuration file gets the requirements from |
|
3 | 3 | ; setup.py like a "pip install ipython[test]". To create the environments, it |
|
4 | 4 | ; requires every interpreter available/installed. |
|
5 | 5 | ; -- Commands -- |
|
6 | 6 | ; pip install tox # Installs tox |
|
7 | 7 | ; tox # Runs the tests (call from the directory with tox.ini) |
|
8 | 8 | ; tox -r # Ditto, but forcing the virtual environments to be rebuilt |
|
9 | 9 | ; tox -e py35,pypy # Runs only in the selected environments |
|
10 | 10 | ; tox -- --all -j # Runs "iptest --all -j" in every environment |
|
11 | 11 | |
|
12 | 12 | [tox] |
|
13 |
envlist = py{36,35,34,33, |
|
|
13 | envlist = py{36,35,34,33,py} | |
|
14 | 14 | skip_missing_interpreters = True |
|
15 | 15 | toxworkdir = /tmp/tox_ipython |
|
16 | 16 | |
|
17 | 17 | [testenv] |
|
18 | 18 | ; PyPy requires its Numpy fork instead of "pip install numpy" |
|
19 | 19 | ; Other IPython/testing dependencies should be in setup.py, not here |
|
20 | 20 | deps = |
|
21 | 21 | pypy: https://bitbucket.org/pypy/numpy/get/master.zip |
|
22 |
py{36,35,34,33 |
|
|
22 | py{36,35,34,33}: matplotlib | |
|
23 | 23 | .[test] |
|
24 | 24 | |
|
25 | 25 | ; It's just to avoid loading the IPython package in the current directory |
|
26 | 26 | changedir = {envtmpdir} |
|
27 | 27 | |
|
28 | 28 | commands = iptest {posargs} |
General Comments 0
You need to be logged in to leave comments.
Login now