Show More
@@ -8,8 +8,8 b' are done by an object of class MPIRectPartitioner2D' | |||||
8 | An example of running the program is (8 processors, 4x2 partition, |
|
8 | An example of running the program is (8 processors, 4x2 partition, | |
9 | 400x100 grid cells):: |
|
9 | 400x100 grid cells):: | |
10 |
|
10 | |||
11 |
$ ipcluster |
|
11 | $ ipcluster start --engines=MPIExec -n 8 # start 8 engines with mpiexec | |
12 |
$ |
|
12 | $ python parallelwave-mpi.py --grid 400 100 --partition 4 2 | |
13 |
|
13 | |||
14 | See also parallelwave-mpi, which runs the same program, but uses MPI |
|
14 | See also parallelwave-mpi, which runs the same program, but uses MPI | |
15 | (via mpi4py) for the inter-engine communication. |
|
15 | (via mpi4py) for the inter-engine communication. | |
@@ -179,7 +179,7 b" if __name__ == '__main__':" | |||||
179 |
|
179 | |||
180 | # run again with numpy vectorized inner-implementation |
|
180 | # run again with numpy vectorized inner-implementation | |
181 | t0 = time.time() |
|
181 | t0 = time.time() | |
182 |
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test) |
|
182 | ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action) | |
183 | if final_test: |
|
183 | if final_test: | |
184 | # this sum is performed element-wise as results finish |
|
184 | # this sum is performed element-wise as results finish | |
185 | s = sum(ar) |
|
185 | s = sum(ar) |
@@ -8,8 +8,8 b' are done by an object of class ZMQRectPartitioner2D' | |||||
8 | An example of running the program is (8 processors, 4x2 partition, |
|
8 | An example of running the program is (8 processors, 4x2 partition, | |
9 | 200x200 grid cells):: |
|
9 | 200x200 grid cells):: | |
10 |
|
10 | |||
11 |
$ ipcluster |
|
11 | $ ipcluster start -n 8 # start 8 engines | |
12 |
$ |
|
12 | $ python parallelwave.py --grid 200 200 --partition 4 2 | |
13 |
|
13 | |||
14 | See also parallelwave-mpi, which runs the same program, but uses MPI |
|
14 | See also parallelwave-mpi, which runs the same program, but uses MPI | |
15 | (via mpi4py) for the inter-engine communication. |
|
15 | (via mpi4py) for the inter-engine communication. | |
@@ -188,7 +188,7 b" if __name__ == '__main__':" | |||||
188 |
|
188 | |||
189 | t0 = time.time() |
|
189 | t0 = time.time() | |
190 |
|
190 | |||
191 |
ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test) |
|
191 | ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action) | |
192 | if final_test: |
|
192 | if final_test: | |
193 | # this sum is performed element-wise as results finish |
|
193 | # this sum is performed element-wise as results finish | |
194 | s = sum(ar) |
|
194 | s = sum(ar) |
General Comments 0
You need to be logged in to leave comments.
Login now