1 /* Demonstrate global MPI operations */
10 int main(int argc, char *argv[])
13 double message, local_result, total_result_1, total_result_2;
15 MPI_Init(&argc, &argv);
16 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
17 MPI_Comm_size(MPI_COMM_WORLD, &size);
20 * All process must reach here before continuing. */
21 MPI_Barrier(MPI_COMM_WORLD);
22 printf("synchronized ( node %d )\n" , rank);
23 /* Synchronize again to ensure the "synchronized" messages are contiguous. */
24 MPI_Barrier(MPI_COMM_WORLD);
26 /* An arbitrary message */
31 /* Broadcast this message */
32 MPI_Bcast(&message, 1, MPI_DOUBLE, 0 /* root */, MPI_COMM_WORLD);
34 /* Check if message received */
35 printf("node %d -- message %f\n", rank, message);
37 /* Process dependent result */
38 local_result = 2.0 * rank;
40 /* Reduce operations */
41 MPI_Reduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
42 MPI_MAX, 0 /* target_process */, MPI_COMM_WORLD);
44 MPI_Reduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
45 MPI_SUM, 0 /* target_process */, MPI_COMM_WORLD);
47 /* Only target node 0 has the global results */
49 printf("results of global operations: %f %f <-- node 0 has results\n",
50 total_result_1, total_result_2 );
52 /* Reduce operation followed by bcast. */
53 MPI_Allreduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
54 MPI_MAX, MPI_COMM_WORLD);
56 MPI_Allreduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
57 MPI_SUM, MPI_COMM_WORLD);
59 /* All nodes have the results */
60 printf("results of ALLREDUCE operations ( node %d ): %f %f\n",
61 rank, total_result_1, total_result_2 );
63 /* Clean up and exit */