Skip to content

Instantly share code, notes, and snippets.

@Macorreag
Last active June 11, 2019 13:30
Show Gist options
  • Save Macorreag/3bd78ee36a5c2d4141e9135ceae31b77 to your computer and use it in GitHub Desktop.
Save Macorreag/3bd78ee36a5c2d4141e9135ceae31b77 to your computer and use it in GitHub Desktop.
MPI with omp for calculate Pi
# The Hostfile for Open MPI
# The master node, 'slots=4' is used because it is a dual-processor machine.
localhost slots=8
# The following slave nodes are single processor machines:
#cluster-nodo1
#cluster-nodo2
//mpicc mpi-omp_pi.c -o mpi-omp_pi -lm -fopenmp
// mpirun -np 4 --hostfile mpi-hosts ./mpi-omp_pi
#include <stdio.h>
#include <string.h>
#include <mpi.h>
#include <omp.h>
#include <math.h>
#define ITERATIONS 2e09
#define MAXTHREADS 32
int calculatePi(double *pi, int ID, int numprocs)
{ int start, end;
start = (ITERATIONS/numprocs) * ID;
end = (ITERATIONS/numprocs) + 1;
int i = start;
do{
*pi = *pi + (double)(4.0 / ((i*2)+1));
i++;
*pi = *pi - (double)(4.0 / ((i*2)+1));
i++;
}while(i < end);
return 0;
}
int main(int argc, char *argv[])
{
int done = 0, n, processId, numprocs, I, rc, i;
double PI25DT = 3.141592653589793238462643;
double local_pi[MAXTHREADS], global_pi;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &processId);
if (processId == 0) printf("\nLaunching with %i processes", numprocs);
global_pi = 0.0;
#pragma omp parallel num_threads(4)
{
int threadId = omp_get_thread_num();
int threadsTotal = omp_get_num_threads();
int globalId = (processId * threadsTotal) + threadId;
calculatePi(&local_pi[threadId], globalId, threadsTotal*numprocs);
#pragma omp single
{
for(i = 0; i < threadsTotal; i++)
global_pi = global_pi + local_pi[i];
}
printf("%i ", globalId); fflush(stdout);
}
MPI_Reduce(local_pi, &global_pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (processId == 0) printf("\npi is approximately %.16f, Error is %.16f\n", global_pi, fabs(global_pi - PI25DT));
MPI_Finalize();
return 0;
}
//mpicc mpi_test2.c -o mpi_test2 -lm
//
#include <stdio.h>
#include <string.h>
#include <mpi.h>
#include <math.h>
#define MSG_LENGTH 15
main (int argc, char *argv[])
{
int i, tag=1, tasks, iam;
double x;
char message[MSG_LENGTH];
char processor_name[MPI_MAX_PROCESSOR_NAME];
int namelen;
MPI_Status status;
/* Initialize the message passing system, get the number of nodes,
and find out which node we are. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &tasks);
MPI_Comm_rank(MPI_COMM_WORLD, &iam);
if (iam == 0) {
/* Root node sends message to everyone. */
strcpy(message, " Hello, world!");
for (i=1; i<tasks; i++)
MPI_Send(message, MSG_LENGTH, MPI_CHAR, i, tag, MPI_COMM_WORLD);
} else {
/* Receive message from root. */
MPI_Recv(message, MSG_LENGTH, MPI_CHAR, 0, tag,
MPI_COMM_WORLD, &status);
MPI_Get_processor_name(processor_name, &namelen);
printf("\n-->%s", processor_name);
for(i = 0; i < 3e08; i++)
x = x + sin(i);
}
printf(" node %d: %s \n", iam, message);
/* Shut down the message passing system. */
MPI_Finalize();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment