<h2 id="sample">Sample Code</h2>
-<p>The
-code <a href="src/global_mpi_operations.c">global_mpi_operations.c</a>
+<p>The code
+<a href="../../src/global_operations/global_mpi_operations.c">global_mpi_operations.c</a>
illustrates the use of the MPI global operation routines. Note the
syntax of the calls.</p>
saving factor is.</p>
<p>A skeleton implementation of this algorithm is given in the
-code <a href="src/print_tree.c">print_tree.c</a>.</p>
+code <a href="../../src/global_operations/print_tree.c">print_tree.c</a>.</p>
<p>The transmission tree translates in the following rules (generation
refers to the time sequence of receive/send in the tree —
</tr>
</table>
-<p>The code <a href="src/print_tree.c">print_tree.c</a> implements
-these rules.</p>
+<p>The
+code <a href="../../src/global_operations/print_tree.c">print_tree.c</a>
+implements these rules.</p>
+
+<p>The example code from this section is bundled in
+<a href="../../src/global_operations/global_operations.tar.gz">global_operations.tar.gz</a>.</p>
<!--#include virtual="$root_directory/shared/footer.shtml"-->
--- /dev/null
+# General Environment
+
+RM = /bin/rm
+
+# Non-MPI Environment
+
+CC = /usr/bin/gcc
+CFLAGS =
+LD = $(CC)
+LDFLAGS =
+EXECS = print_tree
+
+# MPI Environment
+
+MPI_CC = /usr/bin/mpicc
+MPI_CFLAGS =
+MPI_LD = $(MPI_CC)
+MPI_LDFLAGS =
+MPI_EXECS = broadcast global_mpi_operations
+
+# Top level targets
+
+all: $(EXECS) $(MPI_EXECS)
+
+clean:
+ $(RM) -f *.o $(EXECS) $(MPI_EXECS)
+
+# Non-MPI rules
+
+$(EXECS:%=%.o) : %.o : %.c
+ $(CC) -c $(CFLAGS) -o $@ $^
+
+$(EXECS) : % : %.o
+ $(LD) $(LDFLAGS) -o $@ $^
+
+# MPI rules
+
+$(MPI_EXECS:%=%.o) : %.o : %.c
+ $(MPI_CC) -c $(CFLAGS) -o $@ $^
+
+$(MPI_EXECS) : % : %.o
+ $(MPI_LD) $(LDFLAGS) -o $@ $^
+
+# Interesting Makefile sections
+# 4.12.1 Syntax of Static Pattern Rules
+# 6.3.1 Substitution References
--- /dev/null
+global_operations
+=================
+
+Assorted useful examples of global MPI operations.
+
+Manifest
+--------
+
+======================= ==============================================
+README This file.
+Makefile Automate building and cleanup.
+global_mpi_operations.c Demonstrate assorted global MPI operations.
+broadcast.c Example broadcast implementation.
+print_tree.c Skeleton binary tree distribution code.
+======================= ==============================================
+
+Build
+-----
+
+Just run
+
+ $ make
+
+which also builds a random data file 'data'.
+
+Usage
+-----
+
+Serial code
+
+ $ ./print_tree
+
+Parallel code
+
+ $ mpdboot -f mpd.hosts
+ $ mpiexec -n 4 ./global_mpi_operations
+ $ mpiexec -n 4 ./broadcast
+ $ mpdallexit
--- /dev/null
+/* simple broadcast logic demonstrations */
+
+/* Michel Vallieres */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+
+#define TAG_BCAST 123
+
+
+void my_broadcast(int rank, int size, double *message);
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ double message;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ /* an arbitrary message */
+ message = 0.0;
+ if (rank == 0)
+ message = 5.6789;
+
+ /* local broadcast */
+ my_broadcast(rank, size, &message);
+
+ /* check that nodes received message */
+ printf ("after broadcast -- node %d -- message %f\n", rank, message);
+
+ /* end of code */
+ MPI_Finalize();
+ return EXIT_SUCCESS;
+}
+
+
+void my_broadcast(int rank, int size, double *message)
+{
+ int level, maxlevel;
+ int shift_ip, n_target;
+ int *target_process;
+ int from_process, target;
+ MPI_Status recv_status;
+
+ /* build transmission tree */
+
+ /* size of binary tree */
+ maxlevel = 0;
+ while (1 << maxlevel+1 < size)
+ maxlevel++;
+ /* '<<' is a bitwise shift left, so 1 << b == pow(2, b) */
+
+ /* make space for local branch of tree */
+ target_process = (int *)malloc((unsigned) (size * sizeof(int)));
+
+ /* build the branches */
+ n_target = 0;
+ from_process = -1;
+ n_target = 0;
+ for (level=0; level <= maxlevel; level++)
+ {
+ shift_ip = 1 << level;
+ if (rank >= shift_ip)
+ from_process = rank - shift_ip;
+ if (rank < shift_ip)
+ {
+ target = rank + shift_ip;
+ if (target < size)
+ {
+ target_process[n_target] = target;
+ n_target++;
+ }
+ }
+ }
+
+ /* debugging output */
+ fprintf(stderr, "process %d -- from_process %d -- %d targets\n",
+ rank, from_process, n_target);
+ if (n_target > 0)
+ {
+ for (target=0; target < n_target; target++)
+ fprintf(stderr, "process %d -- target %d\n",
+ rank, target_process[target]);
+ }
+
+ /* message transmission */
+
+ /* receive message */
+ if (rank > 0)
+ {
+ fprintf(stderr, "--- receiving %d %d \n", rank, from_process);
+ fflush(stderr);
+ MPI_Recv(message, 1, MPI_DOUBLE, from_process, TAG_BCAST,
+ MPI_COMM_WORLD, &recv_status);
+ }
+
+ /* send message to all target processes */
+ if (n_target > 0)
+ {
+ fprintf(stderr, "--- sending %d %d \n", rank, n_target);
+ for (target=0 ; target < n_target; target++)
+ MPI_Ssend(message, 1, MPI_DOUBLE, target_process[target],
+ TAG_BCAST, MPI_COMM_WORLD);
+ }
+
+ /* free up space */
+ free(target_process);
+}
--- /dev/null
+/* Demonstrate global MPI operations */
+
+/* Michel Vallieres */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ double message, local_result, total_result_1, total_result_2;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ /* Synchronization.
+ * All process must reach here before continuing. */
+ MPI_Barrier(MPI_COMM_WORLD);
+ printf("synchronized ( node %d )\n" , rank);
+ /* Synchronize again to ensure the "synchronized" messages are contiguous. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* An arbitrary message */
+ message = 0;
+ if (rank == 0)
+ message = 5.6789;
+
+ /* Broadcast this message */
+ MPI_Bcast(&message, 1, MPI_DOUBLE, 0 /* root */, MPI_COMM_WORLD);
+
+ /* Check if message received */
+ printf("node %d -- message %f\n", rank, message);
+
+ /* Process dependent result */
+ local_result = 2.0 * rank;
+
+ /* Reduce operations */
+ MPI_Reduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
+ MPI_MAX, 0 /* target_process */, MPI_COMM_WORLD);
+
+ MPI_Reduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
+ MPI_SUM, 0 /* target_process */, MPI_COMM_WORLD);
+
+ /* Only target node 0 has the global results */
+ if ( rank == 0 )
+ printf("results of global operations: %f %f <-- node 0 has results\n",
+ total_result_1, total_result_2 );
+
+ /* Reduce operation followed by bcast. */
+ MPI_Allreduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
+ MPI_MAX, MPI_COMM_WORLD);
+
+ MPI_Allreduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
+ MPI_SUM, MPI_COMM_WORLD);
+
+ /* All nodes have the results */
+ printf("results of ALLREDUCE operations ( node %d ): %f %f\n",
+ rank, total_result_1, total_result_2 );
+
+ /* Clean up and exit */
+ MPI_Finalize();
+ exit(1);
+}
--- /dev/null
+/* skeleton for a broadcast routine from node 0 */\r
+\r
+#include <stdio.h>\r
+#include <math.h>\r
+\r
+int main(int argc, char *argv[])\r
+{\r
+ int two_to_generation;\r
+ int rank, size;\r
+ int to, from;\r
+\r
+ /* scan over a hypothetical virtual machine of 15 nodes */\r
+ size = 15;\r
+ for (rank=0; rank < size; rank++)\r
+ {\r
+ printf("rank %d", rank);\r
+\r
+ /* two_to_generation reflects the steps in the tree broadcast */\r
+ two_to_generation = 1;\r
+ while(two_to_generation < size)\r
+ {\r
+ /* receive message */\r
+ if (rank >= two_to_generation && rank < two_to_generation*2)\r
+ {\r
+ from = rank - two_to_generation;\r
+ if ( from < size )\r
+ printf(" -- from %d", from);\r
+ }\r
+ /* send message */\r
+ if (rank < two_to_generation)\r
+ {\r
+ to = rank + two_to_generation;\r
+ if ( to < size )\r
+ printf(" -- to %d", to);\r
+ }\r
+ two_to_generation = 2 * two_to_generation;\r
+ }\r
+ /* done for a given rank */\r
+ printf("\n");\r
+ }\r
+}\r
+++ /dev/null
-/****************************************************
-* *
-* simple broadcast logic demonstrations *
-* *
-*****************************************************/
- /* Michel Vallieres */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <math.h>
-#include <mpi.h>
-
-void my_broadcast( int myid, int numprocs, double *message );
-
-int main ( int argc, char *argv[] )
-{
- int myid, numprocs;
- double message;
-
- /* join the MPI virtual machine */
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &myid);
- MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
-
- /* an arbitrary message */
- message = 0.0;
- if ( myid == 0 ) message = 5.6788;
-
- /* local broadcast */
- my_broadcast( myid, numprocs, &message );
-
- /* check that nodes received message */
- printf ( " After broadcast -- Node: %d -- message: %f \n", myid, message );
-
- /* end of code */
- MPI_Finalize();
- exit(1);
-
-}
-
-
-void my_broadcast( int myid, int numprocs, double *message )
-{
- int level, maxlevel;
- int shift_ip, n_target;
- int *target_process;
- int from_process, target;
- MPI_Status recv_status;
-
- /* build transmission tree */
-
- /* size of binary tree */
- maxlevel = 0;
- while ( pow(2, maxlevel+1) < numprocs )
- maxlevel++ ;
- /* make space for local branch of tree */
- target_process = (int *)malloc( (unsigned) (numprocs * sizeof(int) ) );
- /* build the branches */
- n_target = 0;
- from_process = -1;
- n_target = 0;
- for ( level = 0; level <= maxlevel ; level++ )
- {
- shift_ip = pow( 2, level );
- if ( myid >= shift_ip )
- from_process = myid - shift_ip;
- if ( myid < shift_ip )
- {
- target = myid + shift_ip;
- if ( target < numprocs )
- {
- target_process[n_target] = target;
- n_target++;
- }
- }
- }
- /* debugging output */
- fprintf( stderr, " process: %d -- from_process: %d -- # of targets: %d\n",
- myid, from_process, n_target);
- if ( n_target > 0 )
- {
- for ( target=0 ; target<n_target ; target++ )
- fprintf( stderr, " process: %d -- target: %d \n", myid, target_process[target] );
- }
-
- /* message transmission */
-
- /* receive message */
- if ( myid > 0 )
- {
- fprintf( stderr, " --- receiving %d %d \n", myid, from_process );
- fflush( stderr );
- MPI_Recv( message, 1, MPI_DOUBLE, from_process, 121,
- MPI_COMM_WORLD, &recv_status );
- }
-
- /* send message to all target processes */
- if ( n_target > 0 )
- {
- fprintf( stderr, " --- sending %d %d \n", myid, n_target );
- for ( target=0 ; target<n_target ; target++ )
- MPI_Ssend( message, 1, MPI_DOUBLE, target_process[target],
- 121, MPI_COMM_WORLD );
- }
- /* free up space */
- free( (char *)target_process );
-}
-
-
-
-
-
-
-
-
-
+++ /dev/null
-/****************************************************
-* *
-* demonstration of global MPI operations *
-* *
-*****************************************************/
- /* Michel Vallieres */
-
-#include <stdio.h>
-#include <mpi.h>
-
-int main ( int argc, char *argv[] )
-{
- int myid, numprocs;
- double message, local_result, total_result_1, total_result_2;
-
- /* join the MPI virtual machine */
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &myid);
- MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
-
- /* synchronization */
- /* all process must reach */
- /* here before continuying */
- MPI_Barrier( MPI_COMM_WORLD );
- printf( " We are synchronized ( node %d ) \n" , myid );
-
- /* an arbitrary message */
- message = 0;
- if (myid == 0)
- message = 5.6788;
-
- /* broadcast this message */
- MPI_Bcast( &message, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
-
- /* check if message received */
- printf( " Mynode: %d -- message: %f \n", myid, message );
-
- /* process dependent result */
- local_result = 2.0*myid;
-
- /* reduce operations */
- MPI_Reduce ( &local_result, &total_result_1, 1, MPI_DOUBLE,
- MPI_MAX, 0, MPI_COMM_WORLD );
-
- MPI_Reduce ( &local_result, &total_result_2, 1, MPI_DOUBLE,
- MPI_SUM, 0, MPI_COMM_WORLD );
-
- /* only target node - 0 - has the global results */
- if ( myid == 0 )
- printf( " Results of global operations: %f %f <-- node 0 has results\n",
- total_result_1, total_result_2 );
-
- /* reduce operation */
- /* followed by bcast */
- MPI_Allreduce ( &local_result, &total_result_1, 1, MPI_DOUBLE,
- MPI_MAX, MPI_COMM_WORLD );
-
- MPI_Allreduce ( &local_result, &total_result_2, 1, MPI_DOUBLE,
- MPI_SUM, MPI_COMM_WORLD );
-
- /* all nodes have the results */
- printf( " Results of ALLREDUCE operations (node: %d ): %f %f \n",
- myid, total_result_1, total_result_2 );
-
- /* end of code */
- MPI_Finalize();
- exit(1);
-
-}
+++ /dev/null
-\r
-\r
- /* skeleton for a broadcast routine from node 0 */\r
-\r
-#include <stdio.h>\r
-#include <math.h>\r
-\r
-int main ( )\r
-{\r
- int two_to_generation;\r
- int rank, size;\r
- int to, from;\r
-\r
- /* scan over hypotetical virtual */\r
- /* machine of 15 nodes */\r
- size = 15; \r
- for ( rank =0 ; rank<size ; rank++ )\r
- {\r
- printf( " rank = %d ", rank );\r
- /* two_to_generation reflects the */ \r
- /* steps in the tree broadcast */\r
- two_to_generation = 1;\r
- while( two_to_generation < size )\r
- { /* receive message */\r
- if ( rank >= two_to_generation && \r
- rank < two_to_generation*2 )\r
- {\r
- from = rank - two_to_generation;\r
- if ( from < size )\r
- printf ( " -- from %d ", from );\r
- }\r
- if ( rank < two_to_generation )\r
- { /* send message */\r
- to = rank + two_to_generation;\r
- if ( to < size )\r
- printf( " -- to %d ", to );\r
- }\r
- two_to_generation = 2 * two_to_generation;\r
- }\r
- /* done for a given rank */\r
- printf("\n");\r
- }\r
-}\r