Show More
@@ -1,61 +1,61 b'' | |||
|
1 | 1 | """An example of how to use IPython1 for plotting remote parallel data |
|
2 | 2 | |
|
3 | 3 | The two files plotting_frontend.py and plotting_backend.py go together. |
|
4 | 4 | |
|
5 | 5 | To run this example, first start the IPython controller and 4 |
|
6 | 6 | engines:: |
|
7 | 7 | |
|
8 |
ipcluster |
|
|
8 | ipcluster start -n 4 | |
|
9 | 9 | |
|
10 | 10 | Then start ipython in pylab mode:: |
|
11 | 11 | |
|
12 | 12 | ipython -pylab |
|
13 | 13 | |
|
14 | 14 | Then a simple "run plotting_frontend.py" in IPython will run the |
|
15 | 15 | example. When this is done, all the variables (such as number, downx, etc.) |
|
16 | 16 | are available in IPython, so for example you can make additional plots. |
|
17 | 17 | """ |
|
18 | 18 | from __future__ import print_function |
|
19 | 19 | |
|
20 | 20 | import numpy as N |
|
21 | 21 | from pylab import * |
|
22 | 22 | from IPython.parallel import Client |
|
23 | 23 | |
|
24 | 24 | # Connect to the cluster |
|
25 | 25 | rc = Client() |
|
26 | 26 | view = rc[:] |
|
27 | 27 | |
|
28 | 28 | # Run the simulation on all the engines |
|
29 | 29 | view.run('plotting_backend.py') |
|
30 | 30 | |
|
31 | 31 | # Bring back the data. These are all AsyncResult objects |
|
32 | 32 | number = view.pull('number') |
|
33 | 33 | d_number = view.pull('d_number') |
|
34 | 34 | downx = view.gather('downx') |
|
35 | 35 | downy = view.gather('downy') |
|
36 | 36 | downpx = view.gather('downpx') |
|
37 | 37 | downpy = view.gather('downpy') |
|
38 | 38 | |
|
39 | 39 | # but we can still iterate through AsyncResults before they are done |
|
40 | 40 | print("number: ", sum(number)) |
|
41 | 41 | print("downsampled number: ", sum(d_number)) |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | # Make a scatter plot of the gathered data |
|
45 | 45 | # These calls to matplotlib could be replaced by calls to pygist or |
|
46 | 46 | # another plotting package. |
|
47 | 47 | figure(1) |
|
48 | 48 | # wait for downx/y |
|
49 | 49 | downx = downx.get() |
|
50 | 50 | downy = downy.get() |
|
51 | 51 | scatter(downx, downy) |
|
52 | 52 | xlabel('x') |
|
53 | 53 | ylabel('y') |
|
54 | 54 | figure(2) |
|
55 | 55 | # wait for downpx/y |
|
56 | 56 | downpx = downpx.get() |
|
57 | 57 | downpy = downpy.get() |
|
58 | 58 | scatter(downpx, downpy) |
|
59 | 59 | xlabel('px') |
|
60 | 60 | ylabel('py') |
|
61 | 61 | show() |
@@ -1,71 +1,71 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | """Test the performance of the task farming system. |
|
3 | 3 | |
|
4 | 4 | This script submits a set of tasks via a LoadBalancedView. The tasks |
|
5 | 5 | are basically just a time.sleep(t), where t is a random number between |
|
6 | 6 | two limits that can be configured at the command line. To run |
|
7 | 7 | the script there must first be an IPython controller and engines running:: |
|
8 | 8 | |
|
9 |
ipcluster |
|
|
9 | ipcluster start -n 16 | |
|
10 | 10 | |
|
11 | 11 | A good test to run with 16 engines is:: |
|
12 | 12 | |
|
13 | 13 | python task_profiler.py -n 128 -t 0.01 -T 1.0 |
|
14 | 14 | |
|
15 | 15 | This should show a speedup of 13-14x. The limitation here is that the |
|
16 | 16 | overhead of a single task is about 0.001-0.01 seconds. |
|
17 | 17 | """ |
|
18 | 18 | import random, sys |
|
19 | 19 | from optparse import OptionParser |
|
20 | 20 | |
|
21 | 21 | from IPython.utils.timing import time |
|
22 | 22 | from IPython.parallel import Client |
|
23 | 23 | |
|
24 | 24 | def main(): |
|
25 | 25 | parser = OptionParser() |
|
26 | 26 | parser.set_defaults(n=100) |
|
27 | 27 | parser.set_defaults(tmin=1e-3) |
|
28 | 28 | parser.set_defaults(tmax=1) |
|
29 | 29 | parser.set_defaults(profile='default') |
|
30 | 30 | |
|
31 | 31 | parser.add_option("-n", type='int', dest='n', |
|
32 | 32 | help='the number of tasks to run') |
|
33 | 33 | parser.add_option("-t", type='float', dest='tmin', |
|
34 | 34 | help='the minimum task length in seconds') |
|
35 | 35 | parser.add_option("-T", type='float', dest='tmax', |
|
36 | 36 | help='the maximum task length in seconds') |
|
37 | 37 | parser.add_option("-p", '--profile', type='str', dest='profile', |
|
38 | 38 | help="the cluster profile [default: 'default']") |
|
39 | 39 | |
|
40 | 40 | (opts, args) = parser.parse_args() |
|
41 | 41 | assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin" |
|
42 | 42 | |
|
43 | 43 | rc = Client() |
|
44 | 44 | view = rc.load_balanced_view() |
|
45 | 45 | print(view) |
|
46 | 46 | rc.block=True |
|
47 | 47 | nengines = len(rc.ids) |
|
48 | 48 | with rc[:].sync_imports(): |
|
49 | 49 | from IPython.utils.timing import time |
|
50 | 50 | |
|
51 | 51 | # the jobs should take a random time within a range |
|
52 | 52 | times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)] |
|
53 | 53 | stime = sum(times) |
|
54 | 54 | |
|
55 | 55 | print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines)) |
|
56 | 56 | time.sleep(1) |
|
57 | 57 | start = time.time() |
|
58 | 58 | amr = view.map(time.sleep, times) |
|
59 | 59 | amr.get() |
|
60 | 60 | stop = time.time() |
|
61 | 61 | |
|
62 | 62 | ptime = stop-start |
|
63 | 63 | scale = stime/ptime |
|
64 | 64 | |
|
65 | 65 | print("executed %.1f secs in %.1f secs"%(stime, ptime)) |
|
66 | 66 | print("%.3fx parallel performance on %i engines"%(scale, nengines)) |
|
67 | 67 | print("%.1f%% of theoretical max"%(100*scale/nengines)) |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | if __name__ == '__main__': |
|
71 | 71 | main() |
General Comments 0
You need to be logged in to leave comments.
Login now