|
#include "stdio.h" |
|
#include <stdlib.h> |
|
#include <math.h> |
|
#include <mpi.h> |
|
|
|
int main(int argc, char *argv[]) |
|
{ |
|
int process_Rank, size_Of_Comm; |
|
double distro_Array[] = {1, 2, 3, 4, 5, 6 ,7, 8, 9, 10, 11, 12, 13, 14}; // data to be distributed |
|
int N = sizeof(distro_Array)/sizeof(distro_Array[0]); |
|
|
|
MPI_Init(&argc, &argv); |
|
MPI_Comm_size(MPI_COMM_WORLD, &size_Of_Comm); |
|
MPI_Comm_rank(MPI_COMM_WORLD, &process_Rank); |
|
|
|
double scattered_Data[2]; |
|
int i; |
|
if(process_Rank==0) |
|
{ |
|
for(i=1;i<size_Of_Comm;i++) |
|
{ |
|
if(2*i<=N) |
|
{ |
|
printf("scattering data %f\n", *((distro_Array)+2*i)); |
|
MPI_Send( |
|
distro_Array+2*(i-1), //Address of the message we are sending. |
|
2, //Number of elements handled by that address. |
|
MPI_DOUBLE, //MPI_TYPE of the message we are sending. |
|
i, //Rank of receiving process |
|
1, //Message Tag |
|
MPI_COMM_WORLD //MPI Communicator |
|
); |
|
} |
|
} |
|
} |
|
else |
|
{ |
|
printf("waiting for data by %d\n", process_Rank); |
|
if(2*process_Rank<=N) |
|
{ |
|
MPI_Recv( |
|
&scattered_Data, //Address of the message we are receiving. |
|
2, //Number of elements handled by that address. |
|
MPI_DOUBLE, //MPI_TYPE of the message we are sending. |
|
0, //Rank of sending process |
|
1, //Message Tag |
|
MPI_COMM_WORLD, //MPI Communicator |
|
MPI_STATUS_IGNORE //MPI Status Object |
|
); |
|
printf("Process %d has received: ",process_Rank); |
|
double sum=0; |
|
for(int i =0;i<2;i++) |
|
{ |
|
printf("%f ", scattered_Data[i]); |
|
sum += scattered_Data[i]; |
|
} |
|
MPI_Send( |
|
&sum, //Address of the message we are sending. |
|
1, //Number of elements handled by that address. |
|
MPI_DOUBLE, //MPI_TYPE of the message we are sending. |
|
0, //Rank of receiving process |
|
1, //Message Tag |
|
MPI_COMM_WORLD //MPI Communicator |
|
); |
|
printf("\n"); |
|
} |
|
} |
|
MPI_Barrier(MPI_COMM_WORLD); // all the sub ranks/processes waits here |
|
/* process 0 will aggregate the results*/ |
|
if(process_Rank==0) |
|
{ |
|
for(i=1;i<size_Of_Comm;i++) |
|
{ |
|
double sum=0; |
|
MPI_Recv( |
|
&sum, //Address of the message we are receiving. |
|
1, //Number of elements handled by that address. |
|
MPI_DOUBLE, //MPI_TYPE of the message we are sending. |
|
i, //Rank of sending process |
|
1, //Message Tag |
|
MPI_COMM_WORLD, //MPI Communicator |
|
MPI_STATUS_IGNORE //MPI Status Object |
|
); |
|
printf("Process %d has sent: %f \n", i, sum); |
|
} |
|
} |
|
MPI_Finalize(); |
|
return 0; |
|
} |
openmpi , parallel, splitting , MPI_Recv , MPI_Barrier , MPI_Send