Show More
@@ -1,276 +1,276 b'' | |||
|
1 | 1 | """Magic functions for running cells in various scripts.""" |
|
2 | 2 | #----------------------------------------------------------------------------- |
|
3 | 3 | # Copyright (c) 2012 The IPython Development Team. |
|
4 | 4 | # |
|
5 | 5 | # Distributed under the terms of the Modified BSD License. |
|
6 | 6 | # |
|
7 | 7 | # The full license is in the file COPYING.txt, distributed with this software. |
|
8 | 8 | #----------------------------------------------------------------------------- |
|
9 | 9 | |
|
10 | 10 | #----------------------------------------------------------------------------- |
|
11 | 11 | # Imports |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | |
|
14 | 14 | # Stdlib |
|
15 | 15 | import os |
|
16 | 16 | import re |
|
17 | 17 | import sys |
|
18 | 18 | import signal |
|
19 | 19 | import time |
|
20 | 20 | from subprocess import Popen, PIPE |
|
21 | 21 | |
|
22 | 22 | # Our own packages |
|
23 | 23 | from IPython.config.configurable import Configurable |
|
24 | 24 | from IPython.core import magic_arguments |
|
25 | 25 | from IPython.core.error import UsageError |
|
26 | 26 | from IPython.core.magic import ( |
|
27 | 27 | Magics, magics_class, line_magic, cell_magic |
|
28 | 28 | ) |
|
29 | 29 | from IPython.lib.backgroundjobs import BackgroundJobManager |
|
30 | 30 | from IPython.testing.skipdoctest import skip_doctest |
|
31 | 31 | from IPython.utils import py3compat |
|
32 | 32 | from IPython.utils.process import find_cmd, FindCmdError, arg_split |
|
33 | 33 | from IPython.utils.traitlets import List, Dict |
|
34 | 34 | |
|
35 | 35 | #----------------------------------------------------------------------------- |
|
36 | 36 | # Magic implementation classes |
|
37 | 37 | #----------------------------------------------------------------------------- |
|
38 | 38 | |
|
39 | 39 | def script_args(f): |
|
40 | 40 | """single decorator for adding script args""" |
|
41 | 41 | args = [ |
|
42 | 42 | magic_arguments.argument( |
|
43 | 43 | '--out', type=str, |
|
44 | 44 | help="""The variable in which to store stdout from the script. |
|
45 | 45 | If the script is backgrounded, this will be the stdout *pipe*, |
|
46 | 46 | instead of the stderr text itself. |
|
47 | 47 | """ |
|
48 | 48 | ), |
|
49 | 49 | magic_arguments.argument( |
|
50 | 50 | '--err', type=str, |
|
51 | 51 | help="""The variable in which to store stderr from the script. |
|
52 | 52 | If the script is backgrounded, this will be the stderr *pipe*, |
|
53 | 53 | instead of the stderr text itself. |
|
54 | 54 | """ |
|
55 | 55 | ), |
|
56 | 56 | magic_arguments.argument( |
|
57 | 57 | '--bg', action="store_true", |
|
58 | 58 | help="""Whether to run the script in the background. |
|
59 | 59 | If given, the only way to see the output of the command is |
|
60 | 60 | with --out/err. |
|
61 | 61 | """ |
|
62 | 62 | ), |
|
63 | 63 | magic_arguments.argument( |
|
64 | 64 | '--proc', type=str, |
|
65 | 65 | help="""The variable in which to store Popen instance. |
|
66 | 66 | This is used only when --bg option is given. |
|
67 | 67 | """ |
|
68 | 68 | ), |
|
69 | 69 | ] |
|
70 | 70 | for arg in args: |
|
71 | 71 | f = arg(f) |
|
72 | 72 | return f |
|
73 | 73 | |
|
74 | 74 | @magics_class |
|
75 | 75 | class ScriptMagics(Magics, Configurable): |
|
76 | 76 | """Magics for talking to scripts |
|
77 | 77 | |
|
78 | 78 | This defines a base `%%script` cell magic for running a cell |
|
79 | 79 | with a program in a subprocess, and registers a few top-level |
|
80 | 80 | magics that call %%script with common interpreters. |
|
81 | 81 | """ |
|
82 | 82 | script_magics = List(config=True, |
|
83 | 83 | help="""Extra script cell magics to define |
|
84 | 84 | |
|
85 | 85 | This generates simple wrappers of `%%script foo` as `%%foo`. |
|
86 | 86 | |
|
87 | 87 | If you want to add script magics that aren't on your path, |
|
88 | 88 | specify them in script_paths |
|
89 | 89 | """, |
|
90 | 90 | ) |
|
91 | 91 | def _script_magics_default(self): |
|
92 | 92 | """default to a common list of programs if we find them""" |
|
93 | 93 | |
|
94 | 94 | defaults = [] |
|
95 | 95 | to_try = [] |
|
96 | 96 | if os.name == 'nt': |
|
97 | 97 | defaults.append('cmd') |
|
98 | 98 | to_try.append('powershell') |
|
99 | 99 | to_try.extend([ |
|
100 | 100 | 'sh', |
|
101 | 101 | 'bash', |
|
102 | 102 | 'perl', |
|
103 | 103 | 'ruby', |
|
104 | 104 | 'python3', |
|
105 | 105 | 'pypy', |
|
106 | 106 | ]) |
|
107 | 107 | |
|
108 | 108 | for cmd in to_try: |
|
109 | 109 | if cmd in self.script_paths: |
|
110 | 110 | defaults.append(cmd) |
|
111 | 111 | else: |
|
112 | 112 | try: |
|
113 | 113 | find_cmd(cmd) |
|
114 | 114 | except FindCmdError: |
|
115 | 115 | # command not found, ignore it |
|
116 | 116 | pass |
|
117 | 117 | except ImportError: |
|
118 | 118 | # Windows without pywin32, find_cmd doesn't work |
|
119 | 119 | pass |
|
120 | 120 | else: |
|
121 | 121 | defaults.append(cmd) |
|
122 | 122 | return defaults |
|
123 | 123 | |
|
124 | 124 | script_paths = Dict(config=True, |
|
125 | 125 | help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' |
|
126 | 126 | |
|
127 | 127 | Only necessary for items in script_magics where the default path will not |
|
128 | 128 | find the right interpreter. |
|
129 | 129 | """ |
|
130 | 130 | ) |
|
131 | 131 | |
|
132 | 132 | def __init__(self, shell=None): |
|
133 | 133 | Configurable.__init__(self, config=shell.config) |
|
134 | 134 | self._generate_script_magics() |
|
135 | 135 | Magics.__init__(self, shell=shell) |
|
136 | 136 | self.job_manager = BackgroundJobManager() |
|
137 | 137 | self.bg_processes = [] |
|
138 | 138 | |
|
139 | 139 | def __del__(self): |
|
140 | 140 | self.kill_bg_processes() |
|
141 | 141 | |
|
142 | 142 | def _generate_script_magics(self): |
|
143 | 143 | cell_magics = self.magics['cell'] |
|
144 | 144 | for name in self.script_magics: |
|
145 | 145 | cell_magics[name] = self._make_script_magic(name) |
|
146 | 146 | |
|
147 | 147 | def _make_script_magic(self, name): |
|
148 | 148 | """make a named magic, that calls %%script with a particular program""" |
|
149 | 149 | # expand to explicit path if necessary: |
|
150 | 150 | script = self.script_paths.get(name, name) |
|
151 | 151 | |
|
152 | 152 | @magic_arguments.magic_arguments() |
|
153 | 153 | @script_args |
|
154 | 154 | def named_script_magic(line, cell): |
|
155 | 155 | # if line, add it as cl-flags |
|
156 | 156 | if line: |
|
157 | 157 | line = "%s %s" % (script, line) |
|
158 | 158 | else: |
|
159 | 159 | line = script |
|
160 | 160 | return self.shebang(line, cell) |
|
161 | 161 | |
|
162 | 162 | # write a basic docstring: |
|
163 | 163 | named_script_magic.__doc__ = \ |
|
164 | 164 | """%%{name} script magic |
|
165 | 165 | |
|
166 | 166 | Run cells with {script} in a subprocess. |
|
167 | 167 | |
|
168 | 168 | This is a shortcut for `%%script {script}` |
|
169 | 169 | """.format(**locals()) |
|
170 | 170 | |
|
171 | 171 | return named_script_magic |
|
172 | 172 | |
|
173 | 173 | @magic_arguments.magic_arguments() |
|
174 | 174 | @script_args |
|
175 | 175 | @cell_magic("script") |
|
176 | 176 | def shebang(self, line, cell): |
|
177 | 177 | """Run a cell via a shell command |
|
178 | 178 | |
|
179 | 179 | The `%%script` line is like the #! line of script, |
|
180 | 180 | specifying a program (bash, perl, ruby, etc.) with which to run. |
|
181 | 181 | |
|
182 | 182 | The rest of the cell is run by that program. |
|
183 | 183 | |
|
184 | 184 | Examples |
|
185 | 185 | -------- |
|
186 | 186 | :: |
|
187 | 187 | |
|
188 | 188 | In [1]: %%script bash |
|
189 | 189 | ...: for i in 1 2 3; do |
|
190 | 190 | ...: echo $i |
|
191 | 191 | ...: done |
|
192 | 192 | 1 |
|
193 | 193 | 2 |
|
194 | 194 | 3 |
|
195 | 195 | """ |
|
196 | 196 | argv = arg_split(line, posix = not sys.platform.startswith('win')) |
|
197 | 197 | args, cmd = self.shebang.parser.parse_known_args(argv) |
|
198 | 198 | |
|
199 | 199 | p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) |
|
200 | 200 | |
|
201 | 201 | cell = cell.encode('utf8', 'replace') |
|
202 | 202 | if args.bg: |
|
203 | 203 | self.bg_processes.append(p) |
|
204 | 204 | if args.out: |
|
205 | 205 | self.shell.user_ns[args.out] = p.stdout |
|
206 | 206 | if args.err: |
|
207 | 207 | self.shell.user_ns[args.err] = p.stderr |
|
208 | self.job_manager.new(self._run_script, p, cell) | |
|
208 | self.job_manager.new(self._run_script, p, cell, daemon=True) | |
|
209 | 209 | if args.proc: |
|
210 | 210 | self.shell.user_ns[args.proc] = p |
|
211 | 211 | return |
|
212 | 212 | |
|
213 | 213 | try: |
|
214 | 214 | out, err = p.communicate(cell) |
|
215 | 215 | except KeyboardInterrupt: |
|
216 | 216 | try: |
|
217 | 217 | p.send_signal(signal.SIGINT) |
|
218 | 218 | time.sleep(0.1) |
|
219 | 219 | if p.poll() is not None: |
|
220 | 220 | print "Process is interrupted." |
|
221 | 221 | return |
|
222 | 222 | p.terminate() |
|
223 | 223 | time.sleep(0.1) |
|
224 | 224 | if p.poll() is not None: |
|
225 | 225 | print "Process is terminated." |
|
226 | 226 | return |
|
227 | 227 | p.kill() |
|
228 | 228 | print "Process is killed." |
|
229 | 229 | except OSError: |
|
230 | 230 | pass |
|
231 | 231 | except Exception as e: |
|
232 | 232 | print "Error while terminating subprocess (pid=%i): %s" \ |
|
233 | 233 | % (p.pid, e) |
|
234 | 234 | return |
|
235 | 235 | out = py3compat.bytes_to_str(out) |
|
236 | 236 | err = py3compat.bytes_to_str(err) |
|
237 | 237 | if args.out: |
|
238 | 238 | self.shell.user_ns[args.out] = out |
|
239 | 239 | else: |
|
240 | 240 | sys.stdout.write(out) |
|
241 | 241 | sys.stdout.flush() |
|
242 | 242 | if args.err: |
|
243 | 243 | self.shell.user_ns[args.err] = err |
|
244 | 244 | else: |
|
245 | 245 | sys.stderr.write(err) |
|
246 | 246 | sys.stderr.flush() |
|
247 | 247 | |
|
248 | 248 | def _run_script(self, p, cell): |
|
249 | 249 | """callback for running the script in the background""" |
|
250 | 250 | p.stdin.write(cell) |
|
251 | 251 | p.stdin.close() |
|
252 | 252 | p.wait() |
|
253 | 253 | |
|
254 | 254 | @line_magic("killbgscripts") |
|
255 | 255 | def kill_bg_processes(self, dummy=None): |
|
256 | 256 | """Kill all BG processes which are still running.""" |
|
257 | 257 | for p in self.bg_processes: |
|
258 | 258 | if p.poll() is None: |
|
259 | 259 | try: |
|
260 | 260 | p.send_signal(signal.SIGINT) |
|
261 | 261 | except: |
|
262 | 262 | pass |
|
263 | 263 | time.sleep(0.1) |
|
264 | 264 | for p in self.bg_processes: |
|
265 | 265 | if p.poll() is None: |
|
266 | 266 | try: |
|
267 | 267 | p.terminate() |
|
268 | 268 | except: |
|
269 | 269 | pass |
|
270 | 270 | time.sleep(0.1) |
|
271 | 271 | for p in self.bg_processes: |
|
272 | 272 | if p.poll() is None: |
|
273 | 273 | try: |
|
274 | 274 | p.kill() |
|
275 | 275 | except: |
|
276 | 276 | pass |
@@ -1,480 +1,484 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Manage background (threaded) jobs conveniently from an interactive shell. |
|
3 | 3 | |
|
4 | 4 | This module provides a BackgroundJobManager class. This is the main class |
|
5 | 5 | meant for public usage, it implements an object which can create and manage |
|
6 | 6 | new background jobs. |
|
7 | 7 | |
|
8 | 8 | It also provides the actual job classes managed by these BackgroundJobManager |
|
9 | 9 | objects, see their docstrings below. |
|
10 | 10 | |
|
11 | 11 | |
|
12 | 12 | This system was inspired by discussions with B. Granger and the |
|
13 | 13 | BackgroundCommand class described in the book Python Scripting for |
|
14 | 14 | Computational Science, by H. P. Langtangen: |
|
15 | 15 | |
|
16 | 16 | http://folk.uio.no/hpl/scripting |
|
17 | 17 | |
|
18 | 18 | (although ultimately no code from this text was used, as IPython's system is a |
|
19 | 19 | separate implementation). |
|
20 | 20 | |
|
21 | 21 | An example notebook is provided in our documentation illustrating interactive |
|
22 | 22 | use of the system. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | #***************************************************************************** |
|
26 | 26 | # Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu> |
|
27 | 27 | # |
|
28 | 28 | # Distributed under the terms of the BSD License. The full license is in |
|
29 | 29 | # the file COPYING, distributed as part of this software. |
|
30 | 30 | #***************************************************************************** |
|
31 | 31 | |
|
32 | 32 | # Code begins |
|
33 | 33 | import sys |
|
34 | 34 | import threading |
|
35 | 35 | |
|
36 | 36 | from IPython.core.ultratb import AutoFormattedTB |
|
37 | 37 | from IPython.utils.warn import warn, error |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | class BackgroundJobManager(object): |
|
41 | 41 | """Class to manage a pool of backgrounded threaded jobs. |
|
42 | 42 | |
|
43 | 43 | Below, we assume that 'jobs' is a BackgroundJobManager instance. |
|
44 | 44 | |
|
45 | 45 | Usage summary (see the method docstrings for details): |
|
46 | 46 | |
|
47 | 47 | jobs.new(...) -> start a new job |
|
48 | 48 | |
|
49 | 49 | jobs() or jobs.status() -> print status summary of all jobs |
|
50 | 50 | |
|
51 | 51 | jobs[N] -> returns job number N. |
|
52 | 52 | |
|
53 | 53 | foo = jobs[N].result -> assign to variable foo the result of job N |
|
54 | 54 | |
|
55 | 55 | jobs[N].traceback() -> print the traceback of dead job N |
|
56 | 56 | |
|
57 | 57 | jobs.remove(N) -> remove (finished) job N |
|
58 | 58 | |
|
59 | 59 | jobs.flush() -> remove all finished jobs |
|
60 | 60 | |
|
61 | 61 | As a convenience feature, BackgroundJobManager instances provide the |
|
62 | 62 | utility result and traceback methods which retrieve the corresponding |
|
63 | 63 | information from the jobs list: |
|
64 | 64 | |
|
65 | 65 | jobs.result(N) <--> jobs[N].result |
|
66 | 66 | jobs.traceback(N) <--> jobs[N].traceback() |
|
67 | 67 | |
|
68 | 68 | While this appears minor, it allows you to use tab completion |
|
69 | 69 | interactively on the job manager instance. |
|
70 | 70 | """ |
|
71 | 71 | |
|
72 | 72 | def __init__(self): |
|
73 | 73 | # Lists for job management, accessed via a property to ensure they're |
|
74 | 74 | # up to date.x |
|
75 | 75 | self._running = [] |
|
76 | 76 | self._completed = [] |
|
77 | 77 | self._dead = [] |
|
78 | 78 | # A dict of all jobs, so users can easily access any of them |
|
79 | 79 | self.all = {} |
|
80 | 80 | # For reporting |
|
81 | 81 | self._comp_report = [] |
|
82 | 82 | self._dead_report = [] |
|
83 | 83 | # Store status codes locally for fast lookups |
|
84 | 84 | self._s_created = BackgroundJobBase.stat_created_c |
|
85 | 85 | self._s_running = BackgroundJobBase.stat_running_c |
|
86 | 86 | self._s_completed = BackgroundJobBase.stat_completed_c |
|
87 | 87 | self._s_dead = BackgroundJobBase.stat_dead_c |
|
88 | 88 | |
|
89 | 89 | @property |
|
90 | 90 | def running(self): |
|
91 | 91 | self._update_status() |
|
92 | 92 | return self._running |
|
93 | 93 | |
|
94 | 94 | @property |
|
95 | 95 | def dead(self): |
|
96 | 96 | self._update_status() |
|
97 | 97 | return self._dead |
|
98 | 98 | |
|
99 | 99 | @property |
|
100 | 100 | def completed(self): |
|
101 | 101 | self._update_status() |
|
102 | 102 | return self._completed |
|
103 | 103 | |
|
104 | 104 | def new(self, func_or_exp, *args, **kwargs): |
|
105 | 105 | """Add a new background job and start it in a separate thread. |
|
106 | 106 | |
|
107 | 107 | There are two types of jobs which can be created: |
|
108 | 108 | |
|
109 | 109 | 1. Jobs based on expressions which can be passed to an eval() call. |
|
110 | 110 | The expression must be given as a string. For example: |
|
111 | 111 | |
|
112 | 112 | job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]]) |
|
113 | 113 | |
|
114 | 114 | The given expression is passed to eval(), along with the optional |
|
115 | 115 | global/local dicts provided. If no dicts are given, they are |
|
116 | 116 | extracted automatically from the caller's frame. |
|
117 | 117 | |
|
118 | 118 | A Python statement is NOT a valid eval() expression. Basically, you |
|
119 | 119 | can only use as an eval() argument something which can go on the right |
|
120 | 120 | of an '=' sign and be assigned to a variable. |
|
121 | 121 | |
|
122 | 122 | For example,"print 'hello'" is not valid, but '2+3' is. |
|
123 | 123 | |
|
124 | 124 | 2. Jobs given a function object, optionally passing additional |
|
125 | 125 | positional arguments: |
|
126 | 126 | |
|
127 | 127 | job_manager.new(myfunc, x, y) |
|
128 | 128 | |
|
129 | 129 | The function is called with the given arguments. |
|
130 | 130 | |
|
131 | 131 | If you need to pass keyword arguments to your function, you must |
|
132 | 132 | supply them as a dict named kw: |
|
133 | 133 | |
|
134 | 134 | job_manager.new(myfunc, x, y, kw=dict(z=1)) |
|
135 | 135 | |
|
136 | 136 | The reason for this assymmetry is that the new() method needs to |
|
137 | 137 | maintain access to its own keywords, and this prevents name collisions |
|
138 | 138 | between arguments to new() and arguments to your own functions. |
|
139 | 139 | |
|
140 | 140 | In both cases, the result is stored in the job.result field of the |
|
141 | 141 | background job object. |
|
142 | 142 | |
|
143 | You can set `daemon` attribute of the thread by giving the keyword | |
|
144 | argument `daemon`. | |
|
143 | 145 | |
|
144 | 146 | Notes and caveats: |
|
145 | 147 | |
|
146 | 148 | 1. All threads running share the same standard output. Thus, if your |
|
147 | 149 | background jobs generate output, it will come out on top of whatever |
|
148 | 150 | you are currently writing. For this reason, background jobs are best |
|
149 | 151 | used with silent functions which simply return their output. |
|
150 | 152 | |
|
151 | 153 | 2. Threads also all work within the same global namespace, and this |
|
152 | 154 | system does not lock interactive variables. So if you send job to the |
|
153 | 155 | background which operates on a mutable object for a long time, and |
|
154 | 156 | start modifying that same mutable object interactively (or in another |
|
155 | 157 | backgrounded job), all sorts of bizarre behaviour will occur. |
|
156 | 158 | |
|
157 | 159 | 3. If a background job is spending a lot of time inside a C extension |
|
158 | 160 | module which does not release the Python Global Interpreter Lock |
|
159 | 161 | (GIL), this will block the IPython prompt. This is simply because the |
|
160 | 162 | Python interpreter can only switch between threads at Python |
|
161 | 163 | bytecodes. While the execution is inside C code, the interpreter must |
|
162 | 164 | simply wait unless the extension module releases the GIL. |
|
163 | 165 | |
|
164 | 166 | 4. There is no way, due to limitations in the Python threads library, |
|
165 | 167 | to kill a thread once it has started.""" |
|
166 | 168 | |
|
167 | 169 | if callable(func_or_exp): |
|
168 | 170 | kw = kwargs.get('kw',{}) |
|
169 | 171 | job = BackgroundJobFunc(func_or_exp,*args,**kw) |
|
170 | 172 | elif isinstance(func_or_exp, basestring): |
|
171 | 173 | if not args: |
|
172 | 174 | frame = sys._getframe(1) |
|
173 | 175 | glob, loc = frame.f_globals, frame.f_locals |
|
174 | 176 | elif len(args)==1: |
|
175 | 177 | glob = loc = args[0] |
|
176 | 178 | elif len(args)==2: |
|
177 | 179 | glob,loc = args |
|
178 | 180 | else: |
|
179 | 181 | raise ValueError( |
|
180 | 182 | 'Expression jobs take at most 2 args (globals,locals)') |
|
181 | 183 | job = BackgroundJobExpr(func_or_exp, glob, loc) |
|
182 | 184 | else: |
|
183 | 185 | raise TypeError('invalid args for new job') |
|
184 | ||
|
186 | ||
|
187 | if kwargs.get('daemon', False): | |
|
188 | job.daemon = True | |
|
185 | 189 | job.num = len(self.all)+1 if self.all else 0 |
|
186 | 190 | self.running.append(job) |
|
187 | 191 | self.all[job.num] = job |
|
188 | 192 | print 'Starting job # %s in a separate thread.' % job.num |
|
189 | 193 | job.start() |
|
190 | 194 | return job |
|
191 | 195 | |
|
192 | 196 | def __getitem__(self, job_key): |
|
193 | 197 | num = job_key if isinstance(job_key, int) else job_key.num |
|
194 | 198 | return self.all[num] |
|
195 | 199 | |
|
196 | 200 | def __call__(self): |
|
197 | 201 | """An alias to self.status(), |
|
198 | 202 | |
|
199 | 203 | This allows you to simply call a job manager instance much like the |
|
200 | 204 | Unix `jobs` shell command.""" |
|
201 | 205 | |
|
202 | 206 | return self.status() |
|
203 | 207 | |
|
204 | 208 | def _update_status(self): |
|
205 | 209 | """Update the status of the job lists. |
|
206 | 210 | |
|
207 | 211 | This method moves finished jobs to one of two lists: |
|
208 | 212 | - self.completed: jobs which completed successfully |
|
209 | 213 | - self.dead: jobs which finished but died. |
|
210 | 214 | |
|
211 | 215 | It also copies those jobs to corresponding _report lists. These lists |
|
212 | 216 | are used to report jobs completed/dead since the last update, and are |
|
213 | 217 | then cleared by the reporting function after each call.""" |
|
214 | 218 | |
|
215 | 219 | # Status codes |
|
216 | 220 | srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead |
|
217 | 221 | # State lists, use the actual lists b/c the public names are properties |
|
218 | 222 | # that call this very function on access |
|
219 | 223 | running, completed, dead = self._running, self._completed, self._dead |
|
220 | 224 | |
|
221 | 225 | # Now, update all state lists |
|
222 | 226 | for num, job in enumerate(running): |
|
223 | 227 | stat = job.stat_code |
|
224 | 228 | if stat == srun: |
|
225 | 229 | continue |
|
226 | 230 | elif stat == scomp: |
|
227 | 231 | completed.append(job) |
|
228 | 232 | self._comp_report.append(job) |
|
229 | 233 | running[num] = False |
|
230 | 234 | elif stat == sdead: |
|
231 | 235 | dead.append(job) |
|
232 | 236 | self._dead_report.append(job) |
|
233 | 237 | running[num] = False |
|
234 | 238 | # Remove dead/completed jobs from running list |
|
235 | 239 | running[:] = filter(None, running) |
|
236 | 240 | |
|
237 | 241 | def _group_report(self,group,name): |
|
238 | 242 | """Report summary for a given job group. |
|
239 | 243 | |
|
240 | 244 | Return True if the group had any elements.""" |
|
241 | 245 | |
|
242 | 246 | if group: |
|
243 | 247 | print '%s jobs:' % name |
|
244 | 248 | for job in group: |
|
245 | 249 | print '%s : %s' % (job.num,job) |
|
246 | 250 | |
|
247 | 251 | return True |
|
248 | 252 | |
|
249 | 253 | def _group_flush(self,group,name): |
|
250 | 254 | """Flush a given job group |
|
251 | 255 | |
|
252 | 256 | Return True if the group had any elements.""" |
|
253 | 257 | |
|
254 | 258 | njobs = len(group) |
|
255 | 259 | if njobs: |
|
256 | 260 | plural = {1:''}.setdefault(njobs,'s') |
|
257 | 261 | print 'Flushing %s %s job%s.' % (njobs,name,plural) |
|
258 | 262 | group[:] = [] |
|
259 | 263 | return True |
|
260 | 264 | |
|
261 | 265 | def _status_new(self): |
|
262 | 266 | """Print the status of newly finished jobs. |
|
263 | 267 | |
|
264 | 268 | Return True if any new jobs are reported. |
|
265 | 269 | |
|
266 | 270 | This call resets its own state every time, so it only reports jobs |
|
267 | 271 | which have finished since the last time it was called.""" |
|
268 | 272 | |
|
269 | 273 | self._update_status() |
|
270 | 274 | new_comp = self._group_report(self._comp_report, 'Completed') |
|
271 | 275 | new_dead = self._group_report(self._dead_report, |
|
272 | 276 | 'Dead, call jobs.traceback() for details') |
|
273 | 277 | self._comp_report[:] = [] |
|
274 | 278 | self._dead_report[:] = [] |
|
275 | 279 | return new_comp or new_dead |
|
276 | 280 | |
|
277 | 281 | def status(self,verbose=0): |
|
278 | 282 | """Print a status of all jobs currently being managed.""" |
|
279 | 283 | |
|
280 | 284 | self._update_status() |
|
281 | 285 | self._group_report(self.running,'Running') |
|
282 | 286 | self._group_report(self.completed,'Completed') |
|
283 | 287 | self._group_report(self.dead,'Dead') |
|
284 | 288 | # Also flush the report queues |
|
285 | 289 | self._comp_report[:] = [] |
|
286 | 290 | self._dead_report[:] = [] |
|
287 | 291 | |
|
288 | 292 | def remove(self,num): |
|
289 | 293 | """Remove a finished (completed or dead) job.""" |
|
290 | 294 | |
|
291 | 295 | try: |
|
292 | 296 | job = self.all[num] |
|
293 | 297 | except KeyError: |
|
294 | 298 | error('Job #%s not found' % num) |
|
295 | 299 | else: |
|
296 | 300 | stat_code = job.stat_code |
|
297 | 301 | if stat_code == self._s_running: |
|
298 | 302 | error('Job #%s is still running, it can not be removed.' % num) |
|
299 | 303 | return |
|
300 | 304 | elif stat_code == self._s_completed: |
|
301 | 305 | self.completed.remove(job) |
|
302 | 306 | elif stat_code == self._s_dead: |
|
303 | 307 | self.dead.remove(job) |
|
304 | 308 | |
|
305 | 309 | def flush(self): |
|
306 | 310 | """Flush all finished jobs (completed and dead) from lists. |
|
307 | 311 | |
|
308 | 312 | Running jobs are never flushed. |
|
309 | 313 | |
|
310 | 314 | It first calls _status_new(), to update info. If any jobs have |
|
311 | 315 | completed since the last _status_new() call, the flush operation |
|
312 | 316 | aborts.""" |
|
313 | 317 | |
|
314 | 318 | # Remove the finished jobs from the master dict |
|
315 | 319 | alljobs = self.all |
|
316 | 320 | for job in self.completed+self.dead: |
|
317 | 321 | del(alljobs[job.num]) |
|
318 | 322 | |
|
319 | 323 | # Now flush these lists completely |
|
320 | 324 | fl_comp = self._group_flush(self.completed, 'Completed') |
|
321 | 325 | fl_dead = self._group_flush(self.dead, 'Dead') |
|
322 | 326 | if not (fl_comp or fl_dead): |
|
323 | 327 | print 'No jobs to flush.' |
|
324 | 328 | |
|
325 | 329 | def result(self,num): |
|
326 | 330 | """result(N) -> return the result of job N.""" |
|
327 | 331 | try: |
|
328 | 332 | return self.all[num].result |
|
329 | 333 | except KeyError: |
|
330 | 334 | error('Job #%s not found' % num) |
|
331 | 335 | |
|
332 | 336 | def _traceback(self, job): |
|
333 | 337 | num = job if isinstance(job, int) else job.num |
|
334 | 338 | try: |
|
335 | 339 | self.all[num].traceback() |
|
336 | 340 | except KeyError: |
|
337 | 341 | error('Job #%s not found' % num) |
|
338 | 342 | |
|
339 | 343 | def traceback(self, job=None): |
|
340 | 344 | if job is None: |
|
341 | 345 | self._update_status() |
|
342 | 346 | for deadjob in self.dead: |
|
343 | 347 | print "Traceback for: %r" % deadjob |
|
344 | 348 | self._traceback(deadjob) |
|
345 | 349 | |
|
346 | 350 | else: |
|
347 | 351 | self._traceback(job) |
|
348 | 352 | |
|
349 | 353 | |
|
350 | 354 | class BackgroundJobBase(threading.Thread): |
|
351 | 355 | """Base class to build BackgroundJob classes. |
|
352 | 356 | |
|
353 | 357 | The derived classes must implement: |
|
354 | 358 | |
|
355 | 359 | - Their own __init__, since the one here raises NotImplementedError. The |
|
356 | 360 | derived constructor must call self._init() at the end, to provide common |
|
357 | 361 | initialization. |
|
358 | 362 | |
|
359 | 363 | - A strform attribute used in calls to __str__. |
|
360 | 364 | |
|
361 | 365 | - A call() method, which will make the actual execution call and must |
|
362 | 366 | return a value to be held in the 'result' field of the job object.""" |
|
363 | 367 | |
|
364 | 368 | # Class constants for status, in string and as numerical codes (when |
|
365 | 369 | # updating jobs lists, we don't want to do string comparisons). This will |
|
366 | 370 | # be done at every user prompt, so it has to be as fast as possible |
|
367 | 371 | stat_created = 'Created'; stat_created_c = 0 |
|
368 | 372 | stat_running = 'Running'; stat_running_c = 1 |
|
369 | 373 | stat_completed = 'Completed'; stat_completed_c = 2 |
|
370 | 374 | stat_dead = 'Dead (Exception), call jobs.traceback() for details' |
|
371 | 375 | stat_dead_c = -1 |
|
372 | 376 | |
|
373 | 377 | def __init__(self): |
|
374 | 378 | raise NotImplementedError, \ |
|
375 | 379 | "This class can not be instantiated directly." |
|
376 | 380 | |
|
377 | 381 | def _init(self): |
|
378 | 382 | """Common initialization for all BackgroundJob objects""" |
|
379 | 383 | |
|
380 | 384 | for attr in ['call','strform']: |
|
381 | 385 | assert hasattr(self,attr), "Missing attribute <%s>" % attr |
|
382 | 386 | |
|
383 | 387 | # The num tag can be set by an external job manager |
|
384 | 388 | self.num = None |
|
385 | 389 | |
|
386 | 390 | self.status = BackgroundJobBase.stat_created |
|
387 | 391 | self.stat_code = BackgroundJobBase.stat_created_c |
|
388 | 392 | self.finished = False |
|
389 | 393 | self.result = '<BackgroundJob has not completed>' |
|
390 | 394 | |
|
391 | 395 | # reuse the ipython traceback handler if we can get to it, otherwise |
|
392 | 396 | # make a new one |
|
393 | 397 | try: |
|
394 | 398 | make_tb = get_ipython().InteractiveTB.text |
|
395 | 399 | except: |
|
396 | 400 | make_tb = AutoFormattedTB(mode = 'Context', |
|
397 | 401 | color_scheme='NoColor', |
|
398 | 402 | tb_offset = 1).text |
|
399 | 403 | # Note that the actual API for text() requires the three args to be |
|
400 | 404 | # passed in, so we wrap it in a simple lambda. |
|
401 | 405 | self._make_tb = lambda : make_tb(None, None, None) |
|
402 | 406 | |
|
403 | 407 | # Hold a formatted traceback if one is generated. |
|
404 | 408 | self._tb = None |
|
405 | 409 | |
|
406 | 410 | threading.Thread.__init__(self) |
|
407 | 411 | |
|
408 | 412 | def __str__(self): |
|
409 | 413 | return self.strform |
|
410 | 414 | |
|
411 | 415 | def __repr__(self): |
|
412 | 416 | return '<BackgroundJob #%d: %s>' % (self.num, self.strform) |
|
413 | 417 | |
|
414 | 418 | def traceback(self): |
|
415 | 419 | print self._tb |
|
416 | 420 | |
|
417 | 421 | def run(self): |
|
418 | 422 | try: |
|
419 | 423 | self.status = BackgroundJobBase.stat_running |
|
420 | 424 | self.stat_code = BackgroundJobBase.stat_running_c |
|
421 | 425 | self.result = self.call() |
|
422 | 426 | except: |
|
423 | 427 | self.status = BackgroundJobBase.stat_dead |
|
424 | 428 | self.stat_code = BackgroundJobBase.stat_dead_c |
|
425 | 429 | self.finished = None |
|
426 | 430 | self.result = ('<BackgroundJob died, call jobs.traceback() for details>') |
|
427 | 431 | self._tb = self._make_tb() |
|
428 | 432 | else: |
|
429 | 433 | self.status = BackgroundJobBase.stat_completed |
|
430 | 434 | self.stat_code = BackgroundJobBase.stat_completed_c |
|
431 | 435 | self.finished = True |
|
432 | 436 | |
|
433 | 437 | |
|
434 | 438 | class BackgroundJobExpr(BackgroundJobBase): |
|
435 | 439 | """Evaluate an expression as a background job (uses a separate thread).""" |
|
436 | 440 | |
|
437 | 441 | def __init__(self, expression, glob=None, loc=None): |
|
438 | 442 | """Create a new job from a string which can be fed to eval(). |
|
439 | 443 | |
|
440 | 444 | global/locals dicts can be provided, which will be passed to the eval |
|
441 | 445 | call.""" |
|
442 | 446 | |
|
443 | 447 | # fail immediately if the given expression can't be compiled |
|
444 | 448 | self.code = compile(expression,'<BackgroundJob compilation>','eval') |
|
445 | 449 | |
|
446 | 450 | glob = {} if glob is None else glob |
|
447 | 451 | loc = {} if loc is None else loc |
|
448 | 452 | self.expression = self.strform = expression |
|
449 | 453 | self.glob = glob |
|
450 | 454 | self.loc = loc |
|
451 | 455 | self._init() |
|
452 | 456 | |
|
453 | 457 | def call(self): |
|
454 | 458 | return eval(self.code,self.glob,self.loc) |
|
455 | 459 | |
|
456 | 460 | |
|
457 | 461 | class BackgroundJobFunc(BackgroundJobBase): |
|
458 | 462 | """Run a function call as a background job (uses a separate thread).""" |
|
459 | 463 | |
|
460 | 464 | def __init__(self, func, *args, **kwargs): |
|
461 | 465 | """Create a new job from a callable object. |
|
462 | 466 | |
|
463 | 467 | Any positional arguments and keyword args given to this constructor |
|
464 | 468 | after the initial callable are passed directly to it.""" |
|
465 | 469 | |
|
466 | 470 | if not callable(func): |
|
467 | 471 | raise TypeError( |
|
468 | 472 | 'first argument to BackgroundJobFunc must be callable') |
|
469 | 473 | |
|
470 | 474 | self.func = func |
|
471 | 475 | self.args = args |
|
472 | 476 | self.kwargs = kwargs |
|
473 | 477 | # The string form will only include the function passed, because |
|
474 | 478 | # generating string representations of the arguments is a potentially |
|
475 | 479 | # _very_ expensive operation (e.g. with large arrays). |
|
476 | 480 | self.strform = str(func) |
|
477 | 481 | self._init() |
|
478 | 482 | |
|
479 | 483 | def call(self): |
|
480 | 484 | return self.func(*self.args, **self.kwargs) |
General Comments 0
You need to be logged in to leave comments.
Login now