Vote count:
0
My program is running and crashes at some point. After scouring over the code, I've come to the conclusion that I don't know enough to figure out why. Can someone offer some help? Below is main(). I'd be happy to post other source files, if you ask, just didn't want to post too much.
Thanks, Scott
int main(int argc, char *argv[])
{
//Global data goes here
int rank, nprocs, i, j, k, rc, chunkSize;
double start, finish, difference;
MPI_Status status;
int *masterArray;
int *slaveArray;
int *subArray;
//Holder for subArrays for reassembly of subArrays
int **arrayOfArrays;
//Beginning and ARRAYSIZE indices of array
Range range;
//Begin execution
//printf("%s", "Entering main()\n");
MPI_Init(&argc, &argv); /* START MPI */
/* DETERMINE RANK OF THIS PROCESSOR */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
//printf("My rank %d\n", rank);
/* DETERMINE TOTAL NUMBER OF PROCESSORS */
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
//printf("Number of processes %d\n", nprocs);
//Compute chunk size
chunkSize = computeChunkSize(ARRAYSIZE, nprocs);
//debug("%s: %d\n", "Chunk size", chunkSize);
// N/#processes
slaveArray = (int *)malloc(sizeof(int) * (chunkSize+1));
//An array of int arrays (a pointer to pointers to ints)
arrayOfArrays = (int **)malloc(sizeof(int *) * (nprocs-1));
/****************************************************************
****************************************************************
************************ MASTER id == 0 ************************
****************************************************************
***************************************************************/
/* MASTER: rank is 0. Problem decomposition- here simple matter of splitting
the master array evenly across the number of worker bees */
if(rank == MASTER)
{
debug("%s", "Entering MASTER process\n");
//Begin timing the runtime of this application
start = MPI_Wtime();
debug("%s: %lg\n", "Start time", start);
//Seed the random number generator
srand(time(NULL));
//Create random array of ints for mpi processing
masterArray = createRandomArray();
debug("%s %d %s %d %s\n", "Master array of random integers from ", BEGIN, " to ", ARRAYSIZE-1, "\n");
/*Create the subArray to be sent to the slaves- malloc returns a pointer
to void, so explicitly coerce the pointer into the desired type with a cast */
subArray = (int *)malloc(sizeof(int) * (chunkSize+1));
//Initalize range
range = (Range){.begin = 0, .end = (ARRAYSIZE/(nprocs-1))};
debug("%s %d %s %d\n", "Range: ", range.begin, " to ", range.end);
//Master decomposes the problem set: begin and end of each subArray sent to slaves
for(i = 1;i < nprocs; i++)
{
//printf("%s", "Inside loop for Master send\n");
range = decomposeProblem(range.begin, range.end, ARRAYSIZE, nprocs, i);
debug("%s %d to %d%s", "Range from decomposition", range.begin, range.end, "\n");
//Index for subArray
k = 0;
//Transfer the slice of the master array to the subArray
for(j = range.begin; j < range.end; j++)
{
subArray[k] = masterArray[j];
//printf("%d\t", subArray[k]);
k++;
}
//printf("%s", "\n");
//Show sub array contents
debug("%s", "Showing subArray before master sends...\n");
showArray(subArray, 0, k);
//printf("%s %d%s", "Send to slave", i, " from master \n");
debug("%s %d%s", "Send to slave", i, " from master \n");
/***************************************************************
****************************************************************
************************ MASTER: SEND **************************
****************************************************************
***************************************************************/
//MPI_Send(buffer,count,type,dest,tag,comm)
rc = MPI_Send(&subArray, chunkSize, MPI_INT, i, 0, MPI_COMM_WORLD);
}
//Blocks until the slaves finish their work and start sending results back to master
/*MPI_Recv is "blocking" in the sense that when the process (in this case
my_rank == 0) reaches the MPI_Recv statement, it will wait until it
actually receives the message (another process sends it). If the other process
is not ready to Send, then the process running on my_rank == 0 will simply
remain idle. If the message is never sent, my_rank == 0 will wait a very long time!*/
for(i = 1;i < nprocs; i++)
{
debug("%s %d%s ", "Receive from slave", i, " to master\n");
/***************************************************************
****************************************************************
************************ MASTER: RECEIVE ***********************
****************************************************************
***************************************************************/
debug("Rank %d approaching master MPI_Probe.\n", rank);
// Probe for an incoming message from process zero
MPI_Probe(rank, 0, MPI_COMM_WORLD, &status);
debug("Rank %d going by MPI_Probe.\n", rank);
// When probe returns, the status object has the size and other
// attributes of the incoming message. Get the size of the message
MPI_Get_count(&status, MPI_INT, &chunkSize);
rc = MPI_Recv(&slaveArray, chunkSize, MPI_INT, i, 0, MPI_COMM_WORLD, &status);
debug("Slave %d dynamically received %d numbers from 0.\n", rank, chunkSize);
//Store subArray in 2D array
debug("%s", "Storing subArray in 2DArray...\n");
arrayOfArrays[i-1] = slaveArray;
}
//rebuild entire sorted array from sorted subarrays
reconstructArray(arrayOfArrays);
//starting with smallest value, validate that each element is <= next element
validateArray(arrayOfArrays);
//Finish timing the runtime of this application
finish = MPI_Wtime();
//Compute the runtime
difference = finish-start;
//Inform user
debug("%s", "Exiting MASTER process\n");
debug("%s %lg", "Time for completion:", difference);
}
/****************************************************************
****************************************************************
************************* End MASTER ***************************
****************************************************************
***************************************************************/
/****************************************************************
****************************************************************
************************ SLAVE id > 1 **************************
****************************************************************
***************************************************************/
else
{
debug("%s", "Entering SLAVE process\n");
//by process id
debug("%s %d%s", "Receive in slave", rank, " from master \n");
debug("Rank %d approaching Slave MPI_Probe.\n", rank);
// Probe for an incoming message from process zero
MPI_Probe(MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status);
debug("Rank %d going by Slave MPI_Probe.\n", rank);
// When probe returns, the status object has the size and other
// attributes of the incoming message. Get the size of the message
MPI_Get_count(&status, MPI_INT, &chunkSize);
debug("Count %d and chunkSize %d after Slave MPI_Get_count.\n", rank, chunkSize);
/***************************************************************
***************************************************************
******************** SLAVE: RECEIVE ***************************
***************************************************************
***************************************************************/
rc = MPI_Recv(&subArray, chunkSize, MPI_INT, 0, 0, MPI_COMM_WORLD, &status);
debug("%d dynamically received %d numbers from 0.\n", rank, chunkSize);
/*Store the received subArray in the slaveArray for processing and sending back
to master*/
slaveArray = subArray;
//Take a look at incoming subArray: size = N/#processes)
debug("%s ", "Show the slaveArray contents in slave receive\n");
debug("Before bubblesort: start %d, finish: %d\n", (rank-1) * chunkSize, rank * chunkSize);
//showArray(slaveArray, (rank-1) * chunkSize, rank * chunkSize);
//Running the actual sorting algorithm on the current slaves subArray
//bubble(slaveArray, ARRAYSIZE);
//Return sorted subArray back to the master by process id
debug("%s %d%s", "Send from slave", i, " to master \n");
/***************************************************************
****************************************************************
************************ SLAVE: SEND ***************************
****************************************************************
***************************************************************/
//MPI_Send(buffer,count,type,dest,tag,comm)
rc = MPI_Send(&slaveArray, chunkSize, MPI_INT, 0, 0, MPI_COMM_WORLD);
debug("%s", "Exiting SLAVE process\n");
}
/****************************************************************
****************************************************************
************************* END SLAVE ****************************
****************************************************************
***************************************************************/
//Clean up memory
//free(subArray);
//free(masterArray);
//free(slaveArray);
//free(arrayOfArrays);
rc = MPI_Get_count(&status, MPI_INT, &chunkSize);
debug("Process %d: received %d int(s) from process %d with tag %d \n", rank, chunkSize, status.MPI_SOURCE, status.MPI_TAG);
/* EXIT MPI */
MPI_Finalize();
debug("%s", "Exiting main()\n");
return 0;
}
asked 39 secs ago
Aucun commentaire:
Enregistrer un commentaire