尝试运行 CusolverSSgels 测试用例,但它不起作用

问题描述

我正在忙于研究 LS 方法,我手动实现了一个共轭梯度求解器,但是在更新我的 CUDA 版本后,我看到有一个函数 (cusolverDnSSgels),我认为它比我的更快手动实施。我的第一个任务是尝试在测试用例上运行它(见下文),根据 MA​​Tlab,我希望结果是:-6.5,9.7。不幸的是我找不到我做错了什么,我也找不到一个例子,因为它是一个相对较新的函数

输出显示 niter= -3,根据文档,这表明迭代次数过多,但这没有意义,因为它是一个非常小的矩阵,应该很容易解决

#include <iostream>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "device_launch_parameters.h"


int main()
{   
    //init id,handle and stat
    int id = cudaGetDevice(&id);
    cusolverDnHandle_t cusolverH;
    cusolverStatus_t stat;

    // create handle
    stat = cusolverDnCreate(&cusolverH);

    //params
    const int C = 3;
    const int M = 2;
    long lda = C;

    //init variables
    float *Amat,*Ymat,*Xmat;
    float *gAmat,*gYmat,*gXmat;

    //allocate mem
    Amat = (float*)malloc(M * C * sizeof(float));
    Ymat = (float*)malloc(C * sizeof(float));
    Xmat = (float*)malloc(M * sizeof(float));

    srand(100);
#if 0
    for (int i = 0; i < C * M; i++) {
        Amat[i] = rand() % 10 + 1;
        Amat[i] = (float)Amat[i];

    }

    for (int i = 0; i < C; i++) {
        Ymat[i] = rand() % 10 + 1;
        Ymat[i] = (float)Ymat[i];
    }
#endif
    Amat[0] = 6;
    Amat[1] = 7;
    Amat[2] = 6;
    Amat[3] = 5;
    Amat[4] = 5;
    Amat[5] = 5;
    Ymat[0] = 9;
    Ymat[1] = 3;
    Ymat[2] = 10;

    //allocate mem
    cudamalloc(&gAmat,M * C * sizeof(float));
    cudamalloc(&gYmat,C * sizeof(float));
    cudamalloc(&gXmat,M * 1 * sizeof(float));

    //copy mem
    cudamemcpy(gAmat,Amat,M * C * sizeof(float),cudamemcpyHostToDevice);
    cudamemcpy(gYmat,Ymat,C * 1 * sizeof(float),cudamemcpyHostToDevice);

    float *gdwork;
    size_t work_bytes;

    stat = cusolverDnSSgels_bufferSize(cusolverH,C,M,1,gAmat,lda,gYmat,gXmat,NULL,&work_bytes);

    std::cout << "Status = " << stat << std::endl;

    int niter = 0;
    int dinfo = 0;

    cudamalloc(&gdwork,work_bytes * sizeof(float));

    stat = cusolverDnSSgels(cusolverH,gdwork,work_bytes,&niter,&dinfo);

    std::cout << "Status = " << stat  << std::endl;
    std::cout << "niter = "  << niter << std::endl;
    std::cout << "dinfo = "  << dinfo << std::endl;

    cudaDeviceSynchronize();

    cudamemcpy(Xmat,M * 1 * sizeof(float),cudamemcpyDevicetoHost);


    //Output printed
    std::cout << Xmat[0] << "," << Xmat[1] << std::endl;

    //free memory
    cudaFree(gdwork);
    free(Amat);
    free(Ymat);
    free(Xmat);


    cudaFree(gXmat);
    cudaFree(gAmat);
    cudaFree(gYmat);

    //destory handle
    cusolverDnDestroy(cusolverH);



    return 0;
}

我得到的结果是:

Status = 0
Status = 0
niter = -3
dinfo = 0
-4.31602e+08,-4.31602e+08

有人能指出我做错了什么吗?

解决方法

您的 dinfo 参数使用有问题。参考 documentation,我们看到:

cusolverDngels() 函数的参数

参数内存输入/输出含义

dinfo 设备输出 IRS 求解器返回的状态。如果 0 - 解决成功。如果 dinfo = -i,则第 i 个参数无效。

dinfo 参数应位于设备内存中。但是您在主机内存中拥有它:

int dinfo = 0;

如果我将存储移动到正确的位置,您的代码会按预期输出您指示的值:

$ cat t143.cu
#include <iostream>
#include <cublas_v2.h>
#include <cusolverDn.h>


int main()
{
    //init id,handle and stat
    int id = cudaGetDevice(&id);
    cusolverDnHandle_t cusolverH;
    cusolverStatus_t stat;

    // create handle
    stat = cusolverDnCreate(&cusolverH);

    //params
    const int C = 3;
    const int M = 2;
    long lda = C;

    //init variables
    float *Amat,*Ymat,*Xmat;
    float *gAmat,*gYmat,*gXmat;

    //allocate mem
    Amat = (float*)malloc(M * C * sizeof(float));
    Ymat = (float*)malloc(C * sizeof(float));
    Xmat = (float*)malloc(M * sizeof(float));

    srand(100);
#if 0
    for (int i = 0; i < C * M; i++) {
        Amat[i] = rand() % 10 + 1;
        Amat[i] = (float)Amat[i];

    }

    for (int i = 0; i < C; i++) {
        Ymat[i] = rand() % 10 + 1;
        Ymat[i] = (float)Ymat[i];
    }
#endif
    Amat[0] = 6;
    Amat[1] = 7;
    Amat[2] = 6;
    Amat[3] = 5;
    Amat[4] = 5;
    Amat[5] = 5;
    Ymat[0] = 9;
    Ymat[1] = 3;
    Ymat[2] = 10;

    //allocate mem
    cudaMalloc(&gAmat,M * C * sizeof(float));
    cudaMalloc(&gYmat,C * sizeof(float));
    cudaMalloc(&gXmat,M * 1 * sizeof(float));

    //copy mem
    cudaMemcpy(gAmat,Amat,M * C * sizeof(float),cudaMemcpyHostToDevice);
    cudaMemcpy(gYmat,Ymat,C * 1 * sizeof(float),cudaMemcpyHostToDevice);

    float *gdwork;
    size_t work_bytes;

    stat = cusolverDnSSgels_bufferSize(cusolverH,C,M,1,gAmat,lda,gYmat,gXmat,NULL,&work_bytes);

    std::cout << "Status = " << stat << std::endl;

    int niter = 0;
    int *dinfo,hinfo;

    cudaMalloc(&gdwork,work_bytes * sizeof(float));
    cudaMalloc(&dinfo,sizeof(int));

    stat = cusolverDnSSgels(cusolverH,gdwork,work_bytes,&niter,dinfo);
    cudaMemcpy(&hinfo,dinfo,sizeof(int),cudaMemcpyDeviceToHost);
    std::cout << "Status = " << stat  << std::endl;
    std::cout << "niter = "  << niter << std::endl;
    std::cout << "dinfo = "  << hinfo << std::endl;

    cudaDeviceSynchronize();

    cudaMemcpy(Xmat,M * 1 * sizeof(float),cudaMemcpyDeviceToHost);


    //Output printed
    std::cout << Xmat[0] << "," << Xmat[1] << std::endl;

    //free memory
    cudaFree(gdwork);
    free(Amat);
    free(Ymat);
    free(Xmat);


    cudaFree(gXmat);
    cudaFree(gAmat);
    cudaFree(gYmat);

    //destory handle
    cusolverDnDestroy(cusolverH);



    return 0;
}
$ nvcc -o t143 t143.cu -lcublas -lcusolver
$ cuda-memcheck ./t143
========= CUDA-MEMCHECK
Status = 0
Status = 0
niter = -51
dinfo = 0
-6.5,9.7
========= ERROR SUMMARY: 0 errors
$

注意事项:

  • 我使用 CUDA 11.3 以上。如果您使用的是早期版本,我强烈建议您升级到 CUDA 11.3 或更新版本以使用此功能。

  • 通过使用 cuda-memcheck

    运行代码,您可以获得有关问题的提示
  • 通过使用文档中给出的参数位置表(主机/设备)查看参数使用情况,很快就发现了问题。您遇到了一个类似的问题 here,您可以通过对照文档中给出的表格查看参数位置(主机/设备)来专注于该问题。检查这可能是一件好事,以节省您将来的时间。