Skip to content

Instantly share code, notes, and snippets.

@1UC1F3R616
Last active June 3, 2020 19:03
Show Gist options
  • Save 1UC1F3R616/c6a0d965716ebf3c0b13cfc2eefa3a1d to your computer and use it in GitHub Desktop.
Save 1UC1F3R616/c6a0d965716ebf3c0b13cfc2eefa3a1d to your computer and use it in GitHub Desktop.

4a

#include <stdio.h> 
#include <stdlib.h> 
#include <mpi.h> 
void my_bcast(void *data, int count, MPI_Datatype datatype, int root, MPI_Comm communicator) 
{ 
    int world_rank; 
    MPI_Comm_rank(communicator, &world_rank); 
    int world_size; MPI_Comm_size(communicator, &world_size); 
    if (world_rank == root) 
    { // If we are the root process, send our data to everyone 
    int i; 
    for (i = 0; i < world_size; i++) 
    { 
        if (i != world_rank) 
        { 
            MPI_Send(data, count, datatype, i, 0, communicator); 
        } 
    } 
    }
    else 
    { // If we are a receiver process, receive the data from the root 
    MPI_Recv(data, count, datatype, root, 0, communicator, MPI_STATUS_IGNORE);
    }
} 
int main(int argc, char **argv) 
{ 
    MPI_Init(NULL, NULL); 
    int world_rank; 
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); 
    int data; 
    if (world_rank == 0) 
    { 
        data = 101; 
        printf("Process 0 broadcasting data %d\n", data); 
        my_bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD); 
    } 
    else 
    { 
        my_bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD); 
        printf("Process %d received data %d from root process\n", world_rank, data); 
    } 
    MPI_Finalize();
}

4b

#include <mpi.h> 
#include <stdio.h> 
#include <stdlib.h> 
/* Define length of dot product vectors */ 
#define VECLEN 10 
int main(int argc, char *argv[])
{ 
    int i, myid, numprocs, len = VECLEN; 
    double *a, *b; 
    double mysum, allsum; 
    /* MPI Initialization */ 
    MPI_Init(&argc, &argv); 
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs); 
    MPI_Comm_rank(MPI_COMM_WORLD, &myid); 
    /* Each MPI task performs the dot product, obtains its partial sum, and then calls MPI_Reduce to obtain the global sum. */ 
    if (myid == 0) 
    printf("Starting omp_dotprod_mpi. Using %d tasks...\n", numprocs); 
    /* Assign storage for dot product vectors */ 
    a = (double *)malloc(len * sizeof(double)); 
    b = (double *)malloc(len * sizeof(double)); 
    /* Initialize dot product vectors */ 
    for (i = 0; i < len; i++)
    {
        a[i] = 1.0; b[i] = a[i]; 
    } 
    /* Perform the dot product */ 
    mysum = 0.0; 
    for (i = 0; i < len; i++) 
    {
        mysum += a[i] * b[i]; 
    } 
    printf("Task %d partial sum = %f\n", myid, mysum); 
   /* After the dot product, perform a summation of results on each node */
    MPI_Reduce(&mysum, &allsum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); 
    if (myid == 0) 
    printf("Done. MPI version: global sum = %f \n", allsum); 
    free(a);
    free(b); 
MPI_Finalize(); 
}

5a

#include "mpi.h" 
#include <stdio.h> 
#include <stdlib.h> 
#define MATSIZE 10 
#define NRA MATSIZE /* number of rows in matrix A */ 
#define NCA MATSIZE /* number of columns in matrix A */ 
#define NCB MATSIZE /* number of columns in matrix B */
#define MASTER 0 /* taskid of first task */ 
#define FROM_MASTER 1 /* setting a message type */ 
#define FROM_WORKER 2 /* setting a message type */ 
int main(int argc, char *argv[]) 
{ 
    int numtasks, /* number of tasks in partition */ 
    taskid, /* a task identifier */ 
    numworkers, /* number of worker tasks */ 
    source, /* task id of message source */ 
    dest, /* task id of message destination */
    mtype, /* message type */ 
    rows, /* rows of matrix A sent to each worker */ 
    averow, extra, offset, /* used to determine rows sent to each worker */ 
    i, j, k, rc; /* misc */ 
    double a[NRA][NCA], /* matrix A to be multiplied */ 
    b[NCA][NCB], /* matrix B to be multiplied */ 
    c[NRA][NCB]; /* result matrix C */ 
    MPI_Status status; 
    MPI_Init(&argc, &argv); 
    MPI_Comm_rank(MPI_COMM_WORLD, &taskid); 
    MPI_Comm_size(MPI_COMM_WORLD, &numtasks); 
    if (numtasks < 2) 
    { 
        printf("Need at least two MPI tasks. Quitting...\n"); 
        MPI_Abort(MPI_COMM_WORLD, rc); exit(1); 
    } 
    numworkers = numtasks - 1; 
    /********** master task ************/ 
    if (taskid == MASTER) 
    { 
        printf("mpiMultiplication has started with %d tasks.\n", numtasks); 
        printf("Initializing arrays...\n"); 
        for (i = 0; i < NRA; i++) 
        for (j = 0; j < NCA; j++) 
        a[i][j] = i + j; 
        for (i = 0; i < NCA; i++) 
        for (j = 0; j < NCB; j++)
        b[i][j] = i * j;
/* Measure start time */ 
double start = MPI_Wtime(); /* Send matrix data to the worker tasks */ 
averow = NRA / numworkers; 
extra = NRA % numworkers; 
offset = 0; 
mtype = FROM_MASTER; 
for (dest = 1; dest <= numworkers; dest++) 
{ 
    rows = (dest <= extra) ? averow + 1 : averow; 
    printf("Sending %d rows to task %d offset=%d\n", rows, dest, offset); 
    MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); 
    MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD); 
    MPI_Send(&a[offset][0], rows * NCA, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); 
    MPI_Send(&b, NCA * NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD); 
    offset = offset + rows; } /* Receive results from worker tasks */ 
    mtype = FROM_WORKER; 
    for (i = 1; i <= numworkers; i++) 
    { 
        source = i; 
        MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); 
        MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status); 
        MPI_Recv(&c[offset][0], rows * NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD, &status);
    printf("Received results from task %d\n", source); 
    } /* Print results */ 
    printf("******************\n"); 
    printf("Result Matrix:\n"); 
    for (i = 0; i < NRA; i++) 
    { printf("\n"); 
    for (j = 0; j < NCB; j++) 
    printf("%6.2f ", c[i][j]); 
    } 
    printf("\n*******************\n"); /* Measure finish time */ 
    double finish = MPI_Wtime(); 
    printf("Done in %f seconds.\n", finish - start); 
    } 
    /********** worker task ************/ 
    if (taskid > MASTER) 
    { 
        mtype = FROM_MASTER; 
        MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); 
        MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); 
        MPI_Recv(&a, rows * NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); 
        MPI_Recv(&b, NCA * NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); 
        for (k = 0; k < NCB; k++)
        for (i = 0; i < rows; i++) 
        { 
            c[i][k] = 0.0; 
            for (j = 0; j < NCA; j++) c[i][k] = c[i][k] + a[i][j] * b[j][k]; 
        } 
        mtype = FROM_WORKER; 
        MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); 
        MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD); 
        MPI_Send(&c, rows * NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD); 
    } 
MPI_Finalize(); 
}

5b

#include <mpi.h> 
#include <math.h> 
#include <stdio.h> 
int main(argc, argv) int argc; char *argv[]; 
{ 
    int done = 0, n, myid, numprocs, i; 
    double PI25DT = 3.141592653589793238462643; 
    double mypi, pi, h, sum, x; 
    MPI_Init(&argc, &argv); 
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs); 
    MPI_Comm_rank(MPI_COMM_WORLD, &myid); 
    while (!done) 
    { 
        if (myid == 0) 
        { 
            printf("Enter the number of intervals: (0 quits) "); 
            scanf("%d", &n); 
        }
    MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); 
    if (n == 0) 
    break; 
    h = 1.0 / (double)n; 
    sum = 0.0; 
    for (i = myid + 1; i <= n; i += numprocs) 
    { 
        x = h * ((double)i - 0.5); 
        sum += 4.0 / (1.0 + x * x); 
    } 
    mypi = h * sum; 
    MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); 
    if (myid == 0) 
    printf("pi is approximately %.16f, Error is %.16f\n", pi, fabs(pi - PI25DT));
    } 
MPI_Finalize(); 
return 0; 
}

6

#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>

#define LIMIT     2500000     /* Increase this to find more primes */
#define FIRST     0           /* Rank of first task */

int isprime(int n) {
int i,squareroot;
if (n>10) {
   squareroot = (int) sqrt(n);
   for (i=3; i<=squareroot; i=i+2)
      if ((n%i)==0)
         return 0;
   return 1;
   }
/* Assume first four primes are counted elsewhere. Forget everything else */
else
   return 0;
}

int main (int argc, char *argv[])
{
int   ntasks,               /* total number of tasks in partitiion */
      rank,                 /* task identifier */
      n,                    /* loop variable */
      pc,                   /* prime counter */
      pcsum,                /* number of primes found by all tasks */
      foundone,             /* most recent prime found */
      maxprime,             /* largest prime found */
      mystart,              /* where to start calculating */
      stride;               /* calculate every nth number */

double start_time,end_time;
    
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&ntasks);
if (((ntasks%2) !=0) || ((LIMIT%ntasks) !=0)) {
   printf("Sorry - this exercise requires an even number of tasks.\n");
   printf("evenly divisible into %d.  Try 4 or 8.\n",LIMIT);
   MPI_Finalize();
   exit(0);
   }

start_time = MPI_Wtime();   /* Initialize start time */
mystart = (rank*2)+1;       /* Find my starting point - must be odd number */
stride = ntasks*2;          /* Determine stride, skipping even numbers */
pc=0;                       /* Initialize prime counter */
foundone = 0;               /* Initialize */

/******* task with rank 0 does this part *******/
if (rank == FIRST) {
   printf("Using %d tasks to scan %d numbers\n",ntasks,LIMIT);
   pc = 4;                  /* Assume first four primes are counted here */
   for (n=mystart; n<=LIMIT; n=n+stride) {
      if (isprime(n)) {
         pc++;
         foundone = n;
         /*** Optional: print each prime as it is found
         printf("%d\n",foundone);
         ***/
         }
      }
   MPI_Reduce(&pc,&pcsum,1,MPI_INT,MPI_SUM,FIRST,MPI_COMM_WORLD);
   MPI_Reduce(&foundone,&maxprime,1,MPI_INT,MPI_MAX,FIRST,MPI_COMM_WORLD);
   end_time=MPI_Wtime();
   printf("Done. Largest prime is %d Total primes %d\n",maxprime,pcsum);
   printf("Wallclock time elapsed: %.2lf seconds\n",end_time-start_time);
   }

/******* all other tasks do this part ********/
if (rank > FIRST) {
   for (n=mystart; n<=LIMIT; n=n+stride) {
      if (isprime(n)) {
         pc++;
         foundone = n;
         /*** Optional: print each prime as it is found
         printf("%d\n",foundone);
         ***/
         }
      }
   MPI_Reduce(&pc,&pcsum,1,MPI_INT,MPI_SUM,FIRST,MPI_COMM_WORLD);
   MPI_Reduce(&foundone,&maxprime,1,MPI_INT,MPI_MAX,FIRST,MPI_COMM_WORLD);
   }

MPI_Finalize();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment