From: W. Trevor King
Date: Thu, 16 Sep 2010 04:01:53 +0000 (-0400)
Subject: Cleaned up src/global_operations/ (was src/print_tree/).
X-Git-Url: http://git.tremily.us/?a=commitdiff_plain;h=f9c630eb32f420c40b5b59cd1849c2f8a794a44a;p=parallel_computing.git
Cleaned up src/global_operations/ (was src/print_tree/).
---
diff --git a/content/global_operations/index.shtml b/content/global_operations/index.shtml
index 73e0a9b..4d9769e 100644
--- a/content/global_operations/index.shtml
+++ b/content/global_operations/index.shtml
@@ -97,8 +97,8 @@ broadcast the results to all the nodes in the communicator.
Sample Code
-The
-code global_mpi_operations.c
+
The code
+global_mpi_operations.c
illustrates the use of the MPI global operation routines. Note the
syntax of the calls.
@@ -128,7 +128,7 @@ info. The larger the number of processors is, more significant the
saving factor is.
A skeleton implementation of this algorithm is given in the
-code print_tree.c.
+code print_tree.c.
The transmission tree translates in the following rules (generation
refers to the time sequence of receive/send in the tree â
@@ -154,7 +154,11 @@ to 7).
-The code print_tree.c implements
-these rules.
+The
+code print_tree.c
+implements these rules.
+
+The example code from this section is bundled in
+global_operations.tar.gz.
diff --git a/src/print_tree/.htaccess b/src/global_operations/.htaccess
similarity index 100%
rename from src/print_tree/.htaccess
rename to src/global_operations/.htaccess
diff --git a/src/print_tree/.make_tar b/src/global_operations/.make_tar
similarity index 100%
rename from src/print_tree/.make_tar
rename to src/global_operations/.make_tar
diff --git a/src/global_operations/Makefile b/src/global_operations/Makefile
new file mode 100644
index 0000000..8334fd5
--- /dev/null
+++ b/src/global_operations/Makefile
@@ -0,0 +1,46 @@
+# General Environment
+
+RM = /bin/rm
+
+# Non-MPI Environment
+
+CC = /usr/bin/gcc
+CFLAGS =
+LD = $(CC)
+LDFLAGS =
+EXECS = print_tree
+
+# MPI Environment
+
+MPI_CC = /usr/bin/mpicc
+MPI_CFLAGS =
+MPI_LD = $(MPI_CC)
+MPI_LDFLAGS =
+MPI_EXECS = broadcast global_mpi_operations
+
+# Top level targets
+
+all: $(EXECS) $(MPI_EXECS)
+
+clean:
+ $(RM) -f *.o $(EXECS) $(MPI_EXECS)
+
+# Non-MPI rules
+
+$(EXECS:%=%.o) : %.o : %.c
+ $(CC) -c $(CFLAGS) -o $@ $^
+
+$(EXECS) : % : %.o
+ $(LD) $(LDFLAGS) -o $@ $^
+
+# MPI rules
+
+$(MPI_EXECS:%=%.o) : %.o : %.c
+ $(MPI_CC) -c $(CFLAGS) -o $@ $^
+
+$(MPI_EXECS) : % : %.o
+ $(MPI_LD) $(LDFLAGS) -o $@ $^
+
+# Interesting Makefile sections
+# 4.12.1 Syntax of Static Pattern Rules
+# 6.3.1 Substitution References
diff --git a/src/global_operations/README b/src/global_operations/README
new file mode 100644
index 0000000..dbafc6c
--- /dev/null
+++ b/src/global_operations/README
@@ -0,0 +1,38 @@
+global_operations
+=================
+
+Assorted useful examples of global MPI operations.
+
+Manifest
+--------
+
+======================= ==============================================
+README This file.
+Makefile Automate building and cleanup.
+global_mpi_operations.c Demonstrate assorted global MPI operations.
+broadcast.c Example broadcast implementation.
+print_tree.c Skeleton binary tree distribution code.
+======================= ==============================================
+
+Build
+-----
+
+Just run
+
+ $ make
+
+which also builds a random data file 'data'.
+
+Usage
+-----
+
+Serial code
+
+ $ ./print_tree
+
+Parallel code
+
+ $ mpdboot -f mpd.hosts
+ $ mpiexec -n 4 ./global_mpi_operations
+ $ mpiexec -n 4 ./broadcast
+ $ mpdallexit
diff --git a/src/global_operations/broadcast.c b/src/global_operations/broadcast.c
new file mode 100644
index 0000000..11204db
--- /dev/null
+++ b/src/global_operations/broadcast.c
@@ -0,0 +1,113 @@
+/* simple broadcast logic demonstrations */
+
+/* Michel Vallieres */
+
+#include
+#include
+#include
+#include
+
+
+#define TAG_BCAST 123
+
+
+void my_broadcast(int rank, int size, double *message);
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ double message;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ /* an arbitrary message */
+ message = 0.0;
+ if (rank == 0)
+ message = 5.6789;
+
+ /* local broadcast */
+ my_broadcast(rank, size, &message);
+
+ /* check that nodes received message */
+ printf ("after broadcast -- node %d -- message %f\n", rank, message);
+
+ /* end of code */
+ MPI_Finalize();
+ return EXIT_SUCCESS;
+}
+
+
+void my_broadcast(int rank, int size, double *message)
+{
+ int level, maxlevel;
+ int shift_ip, n_target;
+ int *target_process;
+ int from_process, target;
+ MPI_Status recv_status;
+
+ /* build transmission tree */
+
+ /* size of binary tree */
+ maxlevel = 0;
+ while (1 << maxlevel+1 < size)
+ maxlevel++;
+ /* '<<' is a bitwise shift left, so 1 << b == pow(2, b) */
+
+ /* make space for local branch of tree */
+ target_process = (int *)malloc((unsigned) (size * sizeof(int)));
+
+ /* build the branches */
+ n_target = 0;
+ from_process = -1;
+ n_target = 0;
+ for (level=0; level <= maxlevel; level++)
+ {
+ shift_ip = 1 << level;
+ if (rank >= shift_ip)
+ from_process = rank - shift_ip;
+ if (rank < shift_ip)
+ {
+ target = rank + shift_ip;
+ if (target < size)
+ {
+ target_process[n_target] = target;
+ n_target++;
+ }
+ }
+ }
+
+ /* debugging output */
+ fprintf(stderr, "process %d -- from_process %d -- %d targets\n",
+ rank, from_process, n_target);
+ if (n_target > 0)
+ {
+ for (target=0; target < n_target; target++)
+ fprintf(stderr, "process %d -- target %d\n",
+ rank, target_process[target]);
+ }
+
+ /* message transmission */
+
+ /* receive message */
+ if (rank > 0)
+ {
+ fprintf(stderr, "--- receiving %d %d \n", rank, from_process);
+ fflush(stderr);
+ MPI_Recv(message, 1, MPI_DOUBLE, from_process, TAG_BCAST,
+ MPI_COMM_WORLD, &recv_status);
+ }
+
+ /* send message to all target processes */
+ if (n_target > 0)
+ {
+ fprintf(stderr, "--- sending %d %d \n", rank, n_target);
+ for (target=0 ; target < n_target; target++)
+ MPI_Ssend(message, 1, MPI_DOUBLE, target_process[target],
+ TAG_BCAST, MPI_COMM_WORLD);
+ }
+
+ /* free up space */
+ free(target_process);
+}
diff --git a/src/global_operations/global_mpi_operations.c b/src/global_operations/global_mpi_operations.c
new file mode 100644
index 0000000..405e694
--- /dev/null
+++ b/src/global_operations/global_mpi_operations.c
@@ -0,0 +1,66 @@
+/* Demonstrate global MPI operations */
+
+/* Michel Vallieres */
+
+#include
+#include
+#include
+
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ double message, local_result, total_result_1, total_result_2;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ /* Synchronization.
+ * All process must reach here before continuing. */
+ MPI_Barrier(MPI_COMM_WORLD);
+ printf("synchronized ( node %d )\n" , rank);
+ /* Synchronize again to ensure the "synchronized" messages are contiguous. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* An arbitrary message */
+ message = 0;
+ if (rank == 0)
+ message = 5.6789;
+
+ /* Broadcast this message */
+ MPI_Bcast(&message, 1, MPI_DOUBLE, 0 /* root */, MPI_COMM_WORLD);
+
+ /* Check if message received */
+ printf("node %d -- message %f\n", rank, message);
+
+ /* Process dependent result */
+ local_result = 2.0 * rank;
+
+ /* Reduce operations */
+ MPI_Reduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
+ MPI_MAX, 0 /* target_process */, MPI_COMM_WORLD);
+
+ MPI_Reduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
+ MPI_SUM, 0 /* target_process */, MPI_COMM_WORLD);
+
+ /* Only target node 0 has the global results */
+ if ( rank == 0 )
+ printf("results of global operations: %f %f <-- node 0 has results\n",
+ total_result_1, total_result_2 );
+
+ /* Reduce operation followed by bcast. */
+ MPI_Allreduce(&local_result, &total_result_1, 1, MPI_DOUBLE,
+ MPI_MAX, MPI_COMM_WORLD);
+
+ MPI_Allreduce(&local_result, &total_result_2, 1, MPI_DOUBLE,
+ MPI_SUM, MPI_COMM_WORLD);
+
+ /* All nodes have the results */
+ printf("results of ALLREDUCE operations ( node %d ): %f %f\n",
+ rank, total_result_1, total_result_2 );
+
+ /* Clean up and exit */
+ MPI_Finalize();
+ exit(1);
+}
diff --git a/src/global_operations/print_tree.c b/src/global_operations/print_tree.c
new file mode 100644
index 0000000..e08157b
--- /dev/null
+++ b/src/global_operations/print_tree.c
@@ -0,0 +1,41 @@
+/* skeleton for a broadcast routine from node 0 */
+
+#include
+#include
+
+int main(int argc, char *argv[])
+{
+ int two_to_generation;
+ int rank, size;
+ int to, from;
+
+ /* scan over a hypothetical virtual machine of 15 nodes */
+ size = 15;
+ for (rank=0; rank < size; rank++)
+ {
+ printf("rank %d", rank);
+
+ /* two_to_generation reflects the steps in the tree broadcast */
+ two_to_generation = 1;
+ while(two_to_generation < size)
+ {
+ /* receive message */
+ if (rank >= two_to_generation && rank < two_to_generation*2)
+ {
+ from = rank - two_to_generation;
+ if ( from < size )
+ printf(" -- from %d", from);
+ }
+ /* send message */
+ if (rank < two_to_generation)
+ {
+ to = rank + two_to_generation;
+ if ( to < size )
+ printf(" -- to %d", to);
+ }
+ two_to_generation = 2 * two_to_generation;
+ }
+ /* done for a given rank */
+ printf("\n");
+ }
+}
diff --git a/src/print_tree/broadcast.c b/src/print_tree/broadcast.c
deleted file mode 100644
index 9ebc7da..0000000
--- a/src/print_tree/broadcast.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/****************************************************
-* *
-* simple broadcast logic demonstrations *
-* *
-*****************************************************/
- /* Michel Vallieres */
-
-#include
-#include
-#include
-#include
-#include
-
-void my_broadcast( int myid, int numprocs, double *message );
-
-int main ( int argc, char *argv[] )
-{
- int myid, numprocs;
- double message;
-
- /* join the MPI virtual machine */
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &myid);
- MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
-
- /* an arbitrary message */
- message = 0.0;
- if ( myid == 0 ) message = 5.6788;
-
- /* local broadcast */
- my_broadcast( myid, numprocs, &message );
-
- /* check that nodes received message */
- printf ( " After broadcast -- Node: %d -- message: %f \n", myid, message );
-
- /* end of code */
- MPI_Finalize();
- exit(1);
-
-}
-
-
-void my_broadcast( int myid, int numprocs, double *message )
-{
- int level, maxlevel;
- int shift_ip, n_target;
- int *target_process;
- int from_process, target;
- MPI_Status recv_status;
-
- /* build transmission tree */
-
- /* size of binary tree */
- maxlevel = 0;
- while ( pow(2, maxlevel+1) < numprocs )
- maxlevel++ ;
- /* make space for local branch of tree */
- target_process = (int *)malloc( (unsigned) (numprocs * sizeof(int) ) );
- /* build the branches */
- n_target = 0;
- from_process = -1;
- n_target = 0;
- for ( level = 0; level <= maxlevel ; level++ )
- {
- shift_ip = pow( 2, level );
- if ( myid >= shift_ip )
- from_process = myid - shift_ip;
- if ( myid < shift_ip )
- {
- target = myid + shift_ip;
- if ( target < numprocs )
- {
- target_process[n_target] = target;
- n_target++;
- }
- }
- }
- /* debugging output */
- fprintf( stderr, " process: %d -- from_process: %d -- # of targets: %d\n",
- myid, from_process, n_target);
- if ( n_target > 0 )
- {
- for ( target=0 ; target 0 )
- {
- fprintf( stderr, " --- receiving %d %d \n", myid, from_process );
- fflush( stderr );
- MPI_Recv( message, 1, MPI_DOUBLE, from_process, 121,
- MPI_COMM_WORLD, &recv_status );
- }
-
- /* send message to all target processes */
- if ( n_target > 0 )
- {
- fprintf( stderr, " --- sending %d %d \n", myid, n_target );
- for ( target=0 ; target
-#include
-
-int main ( int argc, char *argv[] )
-{
- int myid, numprocs;
- double message, local_result, total_result_1, total_result_2;
-
- /* join the MPI virtual machine */
- MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &myid);
- MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
-
- /* synchronization */
- /* all process must reach */
- /* here before continuying */
- MPI_Barrier( MPI_COMM_WORLD );
- printf( " We are synchronized ( node %d ) \n" , myid );
-
- /* an arbitrary message */
- message = 0;
- if (myid == 0)
- message = 5.6788;
-
- /* broadcast this message */
- MPI_Bcast( &message, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD );
-
- /* check if message received */
- printf( " Mynode: %d -- message: %f \n", myid, message );
-
- /* process dependent result */
- local_result = 2.0*myid;
-
- /* reduce operations */
- MPI_Reduce ( &local_result, &total_result_1, 1, MPI_DOUBLE,
- MPI_MAX, 0, MPI_COMM_WORLD );
-
- MPI_Reduce ( &local_result, &total_result_2, 1, MPI_DOUBLE,
- MPI_SUM, 0, MPI_COMM_WORLD );
-
- /* only target node - 0 - has the global results */
- if ( myid == 0 )
- printf( " Results of global operations: %f %f <-- node 0 has results\n",
- total_result_1, total_result_2 );
-
- /* reduce operation */
- /* followed by bcast */
- MPI_Allreduce ( &local_result, &total_result_1, 1, MPI_DOUBLE,
- MPI_MAX, MPI_COMM_WORLD );
-
- MPI_Allreduce ( &local_result, &total_result_2, 1, MPI_DOUBLE,
- MPI_SUM, MPI_COMM_WORLD );
-
- /* all nodes have the results */
- printf( " Results of ALLREDUCE operations (node: %d ): %f %f \n",
- myid, total_result_1, total_result_2 );
-
- /* end of code */
- MPI_Finalize();
- exit(1);
-
-}
diff --git a/src/print_tree/print_tree.c b/src/print_tree/print_tree.c
deleted file mode 100644
index 8152d5f..0000000
--- a/src/print_tree/print_tree.c
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
- /* skeleton for a broadcast routine from node 0 */
-
-#include
-#include
-
-int main ( )
-{
- int two_to_generation;
- int rank, size;
- int to, from;
-
- /* scan over hypotetical virtual */
- /* machine of 15 nodes */
- size = 15;
- for ( rank =0 ; rank= two_to_generation &&
- rank < two_to_generation*2 )
- {
- from = rank - two_to_generation;
- if ( from < size )
- printf ( " -- from %d ", from );
- }
- if ( rank < two_to_generation )
- { /* send message */
- to = rank + two_to_generation;
- if ( to < size )
- printf( " -- to %d ", to );
- }
- two_to_generation = 2 * two_to_generation;
- }
- /* done for a given rank */
- printf("\n");
- }
-}