Show More
@@ -1,61 +1,61 b'' | |||||
1 | """An example of how to use IPython1 for plotting remote parallel data |
|
1 | """An example of how to use IPython1 for plotting remote parallel data | |
2 |
|
2 | |||
3 | The two files plotting_frontend.py and plotting_backend.py go together. |
|
3 | The two files plotting_frontend.py and plotting_backend.py go together. | |
4 |
|
4 | |||
5 | To run this example, first start the IPython controller and 4 |
|
5 | To run this example, first start the IPython controller and 4 | |
6 | engines:: |
|
6 | engines:: | |
7 |
|
7 | |||
8 |
ipcluster |
|
8 | ipcluster start -n 4 | |
9 |
|
9 | |||
10 | Then start ipython in pylab mode:: |
|
10 | Then start ipython in pylab mode:: | |
11 |
|
11 | |||
12 | ipython -pylab |
|
12 | ipython -pylab | |
13 |
|
13 | |||
14 | Then a simple "run plotting_frontend.py" in IPython will run the |
|
14 | Then a simple "run plotting_frontend.py" in IPython will run the | |
15 | example. When this is done, all the variables (such as number, downx, etc.) |
|
15 | example. When this is done, all the variables (such as number, downx, etc.) | |
16 | are available in IPython, so for example you can make additional plots. |
|
16 | are available in IPython, so for example you can make additional plots. | |
17 | """ |
|
17 | """ | |
18 | from __future__ import print_function |
|
18 | from __future__ import print_function | |
19 |
|
19 | |||
20 | import numpy as N |
|
20 | import numpy as N | |
21 | from pylab import * |
|
21 | from pylab import * | |
22 | from IPython.parallel import Client |
|
22 | from IPython.parallel import Client | |
23 |
|
23 | |||
24 | # Connect to the cluster |
|
24 | # Connect to the cluster | |
25 | rc = Client() |
|
25 | rc = Client() | |
26 | view = rc[:] |
|
26 | view = rc[:] | |
27 |
|
27 | |||
28 | # Run the simulation on all the engines |
|
28 | # Run the simulation on all the engines | |
29 | view.run('plotting_backend.py') |
|
29 | view.run('plotting_backend.py') | |
30 |
|
30 | |||
31 | # Bring back the data. These are all AsyncResult objects |
|
31 | # Bring back the data. These are all AsyncResult objects | |
32 | number = view.pull('number') |
|
32 | number = view.pull('number') | |
33 | d_number = view.pull('d_number') |
|
33 | d_number = view.pull('d_number') | |
34 | downx = view.gather('downx') |
|
34 | downx = view.gather('downx') | |
35 | downy = view.gather('downy') |
|
35 | downy = view.gather('downy') | |
36 | downpx = view.gather('downpx') |
|
36 | downpx = view.gather('downpx') | |
37 | downpy = view.gather('downpy') |
|
37 | downpy = view.gather('downpy') | |
38 |
|
38 | |||
39 | # but we can still iterate through AsyncResults before they are done |
|
39 | # but we can still iterate through AsyncResults before they are done | |
40 | print("number: ", sum(number)) |
|
40 | print("number: ", sum(number)) | |
41 | print("downsampled number: ", sum(d_number)) |
|
41 | print("downsampled number: ", sum(d_number)) | |
42 |
|
42 | |||
43 |
|
43 | |||
44 | # Make a scatter plot of the gathered data |
|
44 | # Make a scatter plot of the gathered data | |
45 | # These calls to matplotlib could be replaced by calls to pygist or |
|
45 | # These calls to matplotlib could be replaced by calls to pygist or | |
46 | # another plotting package. |
|
46 | # another plotting package. | |
47 | figure(1) |
|
47 | figure(1) | |
48 | # wait for downx/y |
|
48 | # wait for downx/y | |
49 | downx = downx.get() |
|
49 | downx = downx.get() | |
50 | downy = downy.get() |
|
50 | downy = downy.get() | |
51 | scatter(downx, downy) |
|
51 | scatter(downx, downy) | |
52 | xlabel('x') |
|
52 | xlabel('x') | |
53 | ylabel('y') |
|
53 | ylabel('y') | |
54 | figure(2) |
|
54 | figure(2) | |
55 | # wait for downpx/y |
|
55 | # wait for downpx/y | |
56 | downpx = downpx.get() |
|
56 | downpx = downpx.get() | |
57 | downpy = downpy.get() |
|
57 | downpy = downpy.get() | |
58 | scatter(downpx, downpy) |
|
58 | scatter(downpx, downpy) | |
59 | xlabel('px') |
|
59 | xlabel('px') | |
60 | ylabel('py') |
|
60 | ylabel('py') | |
61 | show() |
|
61 | show() |
@@ -1,71 +1,71 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | """Test the performance of the task farming system. |
|
2 | """Test the performance of the task farming system. | |
3 |
|
3 | |||
4 | This script submits a set of tasks via a LoadBalancedView. The tasks |
|
4 | This script submits a set of tasks via a LoadBalancedView. The tasks | |
5 | are basically just a time.sleep(t), where t is a random number between |
|
5 | are basically just a time.sleep(t), where t is a random number between | |
6 | two limits that can be configured at the command line. To run |
|
6 | two limits that can be configured at the command line. To run | |
7 | the script there must first be an IPython controller and engines running:: |
|
7 | the script there must first be an IPython controller and engines running:: | |
8 |
|
8 | |||
9 |
ipcluster |
|
9 | ipcluster start -n 16 | |
10 |
|
10 | |||
11 | A good test to run with 16 engines is:: |
|
11 | A good test to run with 16 engines is:: | |
12 |
|
12 | |||
13 | python task_profiler.py -n 128 -t 0.01 -T 1.0 |
|
13 | python task_profiler.py -n 128 -t 0.01 -T 1.0 | |
14 |
|
14 | |||
15 | This should show a speedup of 13-14x. The limitation here is that the |
|
15 | This should show a speedup of 13-14x. The limitation here is that the | |
16 | overhead of a single task is about 0.001-0.01 seconds. |
|
16 | overhead of a single task is about 0.001-0.01 seconds. | |
17 | """ |
|
17 | """ | |
18 | import random, sys |
|
18 | import random, sys | |
19 | from optparse import OptionParser |
|
19 | from optparse import OptionParser | |
20 |
|
20 | |||
21 | from IPython.utils.timing import time |
|
21 | from IPython.utils.timing import time | |
22 | from IPython.parallel import Client |
|
22 | from IPython.parallel import Client | |
23 |
|
23 | |||
24 | def main(): |
|
24 | def main(): | |
25 | parser = OptionParser() |
|
25 | parser = OptionParser() | |
26 | parser.set_defaults(n=100) |
|
26 | parser.set_defaults(n=100) | |
27 | parser.set_defaults(tmin=1e-3) |
|
27 | parser.set_defaults(tmin=1e-3) | |
28 | parser.set_defaults(tmax=1) |
|
28 | parser.set_defaults(tmax=1) | |
29 | parser.set_defaults(profile='default') |
|
29 | parser.set_defaults(profile='default') | |
30 |
|
30 | |||
31 | parser.add_option("-n", type='int', dest='n', |
|
31 | parser.add_option("-n", type='int', dest='n', | |
32 | help='the number of tasks to run') |
|
32 | help='the number of tasks to run') | |
33 | parser.add_option("-t", type='float', dest='tmin', |
|
33 | parser.add_option("-t", type='float', dest='tmin', | |
34 | help='the minimum task length in seconds') |
|
34 | help='the minimum task length in seconds') | |
35 | parser.add_option("-T", type='float', dest='tmax', |
|
35 | parser.add_option("-T", type='float', dest='tmax', | |
36 | help='the maximum task length in seconds') |
|
36 | help='the maximum task length in seconds') | |
37 | parser.add_option("-p", '--profile', type='str', dest='profile', |
|
37 | parser.add_option("-p", '--profile', type='str', dest='profile', | |
38 | help="the cluster profile [default: 'default']") |
|
38 | help="the cluster profile [default: 'default']") | |
39 |
|
39 | |||
40 | (opts, args) = parser.parse_args() |
|
40 | (opts, args) = parser.parse_args() | |
41 | assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin" |
|
41 | assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin" | |
42 |
|
42 | |||
43 | rc = Client() |
|
43 | rc = Client() | |
44 | view = rc.load_balanced_view() |
|
44 | view = rc.load_balanced_view() | |
45 | print(view) |
|
45 | print(view) | |
46 | rc.block=True |
|
46 | rc.block=True | |
47 | nengines = len(rc.ids) |
|
47 | nengines = len(rc.ids) | |
48 | with rc[:].sync_imports(): |
|
48 | with rc[:].sync_imports(): | |
49 | from IPython.utils.timing import time |
|
49 | from IPython.utils.timing import time | |
50 |
|
50 | |||
51 | # the jobs should take a random time within a range |
|
51 | # the jobs should take a random time within a range | |
52 | times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)] |
|
52 | times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)] | |
53 | stime = sum(times) |
|
53 | stime = sum(times) | |
54 |
|
54 | |||
55 | print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines)) |
|
55 | print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines)) | |
56 | time.sleep(1) |
|
56 | time.sleep(1) | |
57 | start = time.time() |
|
57 | start = time.time() | |
58 | amr = view.map(time.sleep, times) |
|
58 | amr = view.map(time.sleep, times) | |
59 | amr.get() |
|
59 | amr.get() | |
60 | stop = time.time() |
|
60 | stop = time.time() | |
61 |
|
61 | |||
62 | ptime = stop-start |
|
62 | ptime = stop-start | |
63 | scale = stime/ptime |
|
63 | scale = stime/ptime | |
64 |
|
64 | |||
65 | print("executed %.1f secs in %.1f secs"%(stime, ptime)) |
|
65 | print("executed %.1f secs in %.1f secs"%(stime, ptime)) | |
66 | print("%.3fx parallel performance on %i engines"%(scale, nengines)) |
|
66 | print("%.3fx parallel performance on %i engines"%(scale, nengines)) | |
67 | print("%.1f%% of theoretical max"%(100*scale/nengines)) |
|
67 | print("%.1f%% of theoretical max"%(100*scale/nengines)) | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | if __name__ == '__main__': |
|
70 | if __name__ == '__main__': | |
71 | main() |
|
71 | main() |
General Comments 0
You need to be logged in to leave comments.
Login now