-
-
Save ehamberg/1263868 to your computer and use it in GitHub Desktop.
#include <mpi.h> | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <math.h> | |
#define SIZE 4 | |
int main(int argc, char *argv[]) | |
{ | |
int rank, size; // for storing this process' rank, and the number of processes | |
MPI_Init(&argc, &argv); | |
MPI_Comm_rank(MPI_COMM_WORLD, &rank); | |
MPI_Comm_size(MPI_COMM_WORLD, &size); | |
int *sendcounts; // array describing how many elements to send to each process | |
int *displs; // array describing the displacements where each segment begins | |
int rem = (SIZE*SIZE)%size; // elements remaining after division among processes | |
int sum = 0; // Sum of counts. Used to calculate displacements | |
char rec_buf[100]; // buffer where the received data should be stored | |
// the data to be distributed | |
char data[SIZE][SIZE] = { | |
{'a', 'b', 'c', 'd'}, | |
{'e', 'f', 'g', 'h'}, | |
{'i', 'j', 'k', 'l'}, | |
{'m', 'n', 'o', 'p'} | |
}; | |
sendcounts = malloc(sizeof(int)*size); | |
displs = malloc(sizeof(int)*size); | |
// calculate send counts and displacements | |
for (int i = 0; i < size; i++) { | |
sendcounts[i] = (SIZE*SIZE)/size; | |
if (rem > 0) { | |
sendcounts[i]++; | |
rem--; | |
} | |
displs[i] = sum; | |
sum += sendcounts[i]; | |
} | |
// print calculated send counts and displacements for each process | |
if (0 == rank) { | |
for (int i = 0; i < size; i++) { | |
printf("sendcounts[%d] = %d\tdispls[%d] = %d\n", i, sendcounts[i], i, displs[i]); | |
} | |
} | |
// divide the data among processes as described by sendcounts and displs | |
MPI_Scatterv(&data, sendcounts, displs, MPI_CHAR, &rec_buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD); | |
// print what each process received | |
printf("%d: ", rank); | |
for (int i = 0; i < sendcounts[rank]; i++) { | |
printf("%c\t", rec_buf[i]); | |
} | |
printf("\n"); | |
MPI_Finalize(); | |
free(sendcounts); | |
free(displs); | |
return 0; | |
} |
mpirun noticed that process rank 0 with PID 28921 on node sateesh-OptiPlex-9020 exited on signal 8 (Floating point exception)....??
can anyone help me
mpirun noticed that process rank 0 with PID 28921 on node sateesh-OptiPlex-9020 exited on signal 8 (Floating point exception)....??
can anyone help me
Me too. It has been 3 years and still no answer.
Uninitialized variable size
is used at line 14 to calculate the remainder. But, size
gets the number of processes later, at line 28. It can fixed moving lines 26-28 at the beginning of main(), between lines 10 and 11.
For those who want to know what it prints out without compiling:
1: e f g h
sendcounts[0] = 4 displs[0] = 0
sendcounts[1] = 4 displs[1] = 4
sendcounts[2] = 4 displs[2] = 8
sendcounts[3] = 4 displs[3] = 12
0: a b c d
3: m n o p
2: i j k l
Great, thanks
LO DE ABAJO ES EL CODIGO CORRECTO:
.
.
.
.
.
.
.
.
.
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 4
int main(int argc, char *argv[])
{
int rank, numProcs; // para almacenar rango número de procesos
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
int *sendcounts; // matriz que describe cuántos elementos enviar a cada proceso
int *displs; // array que describe los desplazamientos donde comienza cada segmento
int rem = (N*N)%numProcs; // Elementos que quedan después de la división entre procesos
int sum = 0; // Suma de cuentas. Se utiliza para calcular desplazamientos
char rec_buf[100]; // buffer donde se deben almacenar los datos recibidos
// los datos a distribuir
char data[N][N] = {
{'a', 'b', 'c', 'd'},
{'e', 'f', 'g', 'h'},
{'i', 'j', 'k', 'l'},
{'m', 'n', 'o', 'p'}
};
sendcounts = malloc(sizeof(int)*numProcs);
displs = malloc(sizeof(int)*numProcs);
// calcular conteos y desplazamientos de envío
for (int i = 0; i < numProcs; i++) {
sendcounts[i] = (N*N)/numProcs;
if (rem > 0) {
sendcounts[i]++;
rem--;
}
displs[i] = sum;
sum += sendcounts[i];
}
// imprimir los conteos y desplazamientos de envío calculados para cada proceso
if (0 == rank) {
for (int i = 0; i < numProcs; i++) {
printf("sendcounts[%d] = %d\tdispls[%d] = %d\n", i, sendcounts[i], i, displs[i]);
}
}
// dividir los datos entre procesos según lo descrito por sendcounts y displs
MPI_Scatterv(&data, sendcounts, displs, MPI_CHAR, &rec_buf, 100, MPI_CHAR, 0, MPI_COMM_WORLD);
// imprime lo que recibió cada proceso
printf("%d: ", rank);
for (int i = 0; i < sendcounts[rank]; i++) {
printf("%c\t", rec_buf[i]);
}
printf("\n");
MPI_Finalize();
free(sendcounts);
free(displs);
return 0;
}
Thanks a lot, very helpful ❤️
Thank you for the code, very helpfull
Can I Call MPI_Scatterv only in few processes ?
for example only for process which rank<=20 ?