首页 > 解决方案 > 使用 MPI 进行并行合并排序

问题描述

我使用树结构方案在这段代码中实现了并行合并排序;但它不会对数组进行排序!

你能看看它并告诉我有什么问题吗?

对于处理器之间的通信,我使用了普通MPI_send()MPI_recv(). 但是,我使用数字012作为 . 的第五个参数的标签MPI_recv()

对于 8 个处理器,树结构方案将数组提供给具有等级的处理器,0然后将数组分成两半,将右半部分提供给处理器4并保留左半部分。然后处理器4将其数组分成两半,将右半部分提供给处理器6并保留左半部分。

在这个方案的最后,所有的处理器都在程序中工作,它们都不会空闲。因为在树的叶子上,所有的处理器都有一块 Array 可以对其进行顺序Merge_sort_inc处理。

文本在此处输入图像描述

#include<stdio.h>
#include<mpi.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>

/* print_array() takes the  elements of Array to the output */

void print_array(int arr[], int size)
{
    for (int i = 0; i < size; i++)
        printf("%d ", arr[i]);
    printf("\n");
}

/*copyarray() takes as first argument an Array a[] which its elements 
between indexes start_a and end_a are to be copied to the dynamic Array *b with size of (size_b)  */
void copyarray(int a[] ,int start_a , int end_a, int* b, int size_b)
{
    int i = 0;
    for (i = 0; i < size_b;i++)
    {
        b[i] = a[start_a];
        start_a++;
        if (start_a == end_a)
            break;
    }
}
/* merge () function is just the sequential implementation of merge Sort Algorithm */
void merge(int Arr[], int left, int mid, int right)
{

    int n_l = (mid - left + 1);
    int n_r = (right - mid);
    int* Arr_l = (int*)calloc(n_l, sizeof(int));
    int* Arr_r = (int*)calloc(n_r, sizeof(int));
    if (Arr_l == NULL)
        return;
    if (Arr_r == NULL)
        return;

    for (int i = 0;i < n_l;i++)
        Arr_l[i] = Arr[left + i];

    for (int j = 0;j < n_r;j++)
        Arr_r[j] = Arr[mid + 1 + j];

    int i = 0, j = 0, k = left;
    while (i < n_l && j < n_r)
    {
        if (Arr_l[i] <= Arr_r[j])
        {
            Arr[k] = Arr_l[i];
            i++;
            k++;
        }
        else
        {
            Arr[k] = Arr_r[j];
            j++;
            k++;
        }
    }

    while (i < n_l)
    {
        Arr[k] = Arr_l[i];
        i++;
        k++;
    }

    while (j < n_r)
    {
        Arr[k] = Arr_r[j];
        j++;
        k++;
    }
    free(Arr_l);
    free(Arr_r);
}

/*merge_sort_inc() is the sequential algorithm of sorting in increasing order*/
void merge_sort_inc(int Arr[], int left, int right)
{
    int mid = (int)(left + (right - left) / 2);
    if (left < right)
    {
        merge_sort_inc(Arr, left, mid);
        merge_sort_inc(Arr, mid + 1, right - 1);
        merge(Arr, left, mid, right);
    }

}
/*parallelMerge() builds first the tree-structural communication between the processors. at the leafs of the tree,
  where there is no more divide and concurrent progress the Function gives the the processor the sequential Merge sort algorithm*/
void parallelMerge(int* array, int size, int height)
{
    int parent;
    int rank;
    int numberOfproc;
    int next;
    int rightChild;
    //MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &numberOfproc);

    parent = rank & ~(1 << height);
    next = height - 1;
    rightChild = rank | (1 << (height - 1));

    if (height > 0)
        if (rightChild >= numberOfproc)
            parallelMerge(array, size, next);
        else
        {
            int left_size = (int)(size / 2);
            int right_size = size - left_size;
            int* leftArray = (int*)calloc(left_size, sizeof(int));
            int * rightArray = (int*)calloc(right_size,sizeof(int));
            if (leftArray == NULL)
                return;
            if (rightArray == NULL)
                return;

            int massage[2];
            int i, j , k;
            MPI_Status status;
            copyarray(array, 0, left_size, leftArray, left_size);
            copyarray(array, size - left_size, size, rightArray,right_size);
            massage[0] = next;
            massage[1] = right_size;
            MPI_Send(massage, 2, MPI_INT, rightChild,0, MPI_COMM_WORLD);
            MPI_Send(rightArray, right_size, MPI_INT, rightChild, 1, MPI_COMM_WORLD);

            parallelMerge(leftArray, left_size, next);
            MPI_Recv(rightArray, right_size, MPI_INT, rightChild, 2, MPI_COMM_WORLD, &status);


            i = j = k = 0;
            while (i < left_size && j < right_size)
            {
                if (leftArray[i] < rightArray[j])
                {
                    array[k] = leftArray[i]; i++, k++;
                }
                else
                {
                    array[k] = rightArray[j]; j++, k++;
                }
            }
            while (i<left_size)
            {
                array[k] = leftArray[i];
                k++;
                i++;
            }

            while (j<right_size)
            {
                array[k] = rightArray[j];
                k++;
                j++;
            }
        }
    else
    {
        merge_sort_inc(array, 0 ,size);
        if (parent != rank)
            MPI_Send(array, size, MPI_INT, parent, 2, MPI_COMM_WORLD);
    }

}


/////////////////////////////////////////////////////////////////////////////////////////////
int main()
{
    /*building an array with the help of Random function*/
    time_t t;
    srand((unsigned)time(&t));
    int Arr[100];
    int arrSize = sizeof(Arr) / sizeof(int);
    for (int i = 0; i < arrSize; i++)
        Arr[i] = rand() / 100;
    printf("the unsorted array is : \n ");
    print_array(Arr, arrSize);

    /*starting the parallel sorting*/ 
    int rank; 
    int comm_size;
    MPI_Init(NULL,NULL);
    MPI_Comm_rank(MPI_COMM_WORLD , &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
    double start = MPI_Wtime();//capture time
    if (rank == 0)
    {
        int roothight = 0; 
        int nodeCount = 1;
        while (nodeCount < comm_size)
            nodeCount++;

        roothight = (int)log(nodeCount);

        int* newarray = (int*)calloc(arrSize, sizeof(int));
        if (newarray == NULL)
            return 1;
        copyarray(Arr, 0, arrSize - 1, newarray, arrSize );

        
        parallelMerge(newarray, arrSize, roothight);
        double midle = MPI_Wtime();
    }
    else
    {
        int massage[2];
        int height;
        int size_array;
        MPI_Status status;
        MPI_Recv(massage, 2, MPI_INT, MPI_ANY_SOURCE,0, MPI_COMM_WORLD, &status);
        height = massage[0];
        size_array = massage[1];
        int* newarray = (int*)calloc(size_array, sizeof(int));
        if (newarray == NULL)
            return 1;
        MPI_Recv(newarray, size_array, MPI_INT, MPI_ANY_SOURCE,1, MPI_COMM_WORLD, &status);
        parallelMerge(newarray, size_array, height);
    }
    
    double end = MPI_Wtime();
    MPI_Finalize();
    printf("\n the sorted array is : \n");
    print_array(Arr, arrSize);
    printf("\n the sorting takes %lf time ", (end - start));
    return 0;
}

标签: cmpi

解决方案


推荐阅读