Nouveau驱动程序如何在GPU端分配内存?

问题描述

我一直在研究Nouveau GPU驱动程序(用于Nvidia GPU的开源GPU驱动程序),因为我想了解驱动程序内部的实际情况。 具体来说,我一直在尝试弄清在 cpu上运行的Nouveau代码如何能够在 GPU端上分配内存。
换句话说,我想学习cudamalloc()如何在GPU端分配内存。(我知道cudamalloc()是CUDA API,但是我不确定相当于CUDA的cudamalloc()在Nouveau上)
到目前为止,这是我发现的内容

  1. Nouveau的TTM(转换表管理器)通过int ttm_bo_init(...)初始化bo(缓冲区对象),其定义可以在here处找到,并由以下方式给出:
int ttm_bo_init(struct ttm_bo_device *bdev,struct ttm_buffer_object *bo,unsigned long size,enum ttm_bo_type type,struct ttm_placement *placement,uint32_t page_alignment,unsigned long buffer_start,bool interruptible,struct file *persistent_swap_storage,size_t acc_size,void (*destroy) (struct ttm_buffer_object *))
{
    int ret = 0;
    unsigned long num_pages;
    struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;

    ret = ttm_mem_global_alloc(mem_glob,acc_size,false,false);
    if (ret) {
        printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
        if (destroy)
            (*destroy)(bo);
        else
            kfree(bo);
        return -ENOMEM;
    }

    size += buffer_start & ~PAGE_MASK;
    num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    if (num_pages == 0) {
        printk(KERN_ERR TTM_PFX "Ille@R_502_6491@ buffer object size.\n");
        if (destroy)
            (*destroy)(bo);
        else
            kfree(bo);
        return -EINVAL;
    }
    bo->destroy = destroy;

    kref_init(&bo->kref);
    kref_init(&bo->list_kref);
    atomic_set(&bo->cpu_writers,0);
    atomic_set(&bo->reserved,1);
    init_waitqueue_head(&bo->event_queue);
    INIT_LIST_HEAD(&bo->lru);
    INIT_LIST_HEAD(&bo->ddestroy);
    INIT_LIST_HEAD(&bo->swap);
    INIT_LIST_HEAD(&bo->io_reserve_lru);
    bo->bdev = bdev;
    bo->glob = bdev->glob;
    bo->type = type;
    bo->num_pages = num_pages;
    bo->mem.size = num_pages << PAGE_SHIFT;
    bo->mem.mem_type = TTM_PL_SYstem;
    bo->mem.num_pages = bo->num_pages;
    bo->mem.mm_node = NULL;
    bo->mem.page_alignment = page_alignment;
    bo->mem.bus.io_reserved_vm = false;
    bo->mem.bus.io_reserved_count = 0;
    bo->buffer_start = buffer_start & PAGE_MASK;
    bo->priv_flags = 0;
    bo->mem.placement = (TTM_PL_FLAG_SYstem | TTM_PL_FLAG_CACHED);
    bo->seq_valid = false;
    bo->persistent_swap_storage = persistent_swap_storage;
    bo->acc_size = acc_size;
    atomic_inc(&bo->glob->bo_count);

    ret = ttm_bo_check_placement(bo,placement);
    if (unlikely(ret != 0))
        goto out_err;

    /*
     * For ttm_bo_type_device buffers,allocate
     * address space from the device.
     */
    if (bo->type == ttm_bo_type_device) {
        ret = ttm_bo_setup_vm(bo);
        if (ret)
            goto out_err;
    }

    ret = ttm_bo_validate(bo,placement,interruptible,false);
    if (ret)
        goto out_err;

    ttm_bo_unreserve(bo);
    return 0;

out_err:
    ttm_bo_unreserve(bo);
    ttm_bo_unref(&bo);

    return ret;
}

我认为对于用户空间应用程序,该缓冲区将通过ttm_bo_setup_vm()代码的这一部分分配给设备(GPU)端:

    /*
     * For ttm_bo_type_device buffers,allocate
     * address space from the device.
     */
    if (bo->type == ttm_bo_type_device) {
        ret = ttm_bo_setup_vm(bo);
        if (ret)
            goto out_err;
    }

ttm_bo_setup_vm()被定义为:

/**
 * ttm_bo_setup_vm:
 *
 * @bo: the buffer to allocate address space for
 *
 * Allocate address space in the drm device so that applications
 * can mmap the buffer and access the contents. This only
 * applies to ttm_bo_type_device objects as others are not
 * placed in the drm device address space.
 */

static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
{
    struct ttm_bo_device *bdev = bo->bdev;
    int ret;

retry_pre_get:
    ret = drm_mm_pre_get(&bdev->addr_space_mm);
    if (unlikely(ret != 0))
        return ret;

    write_lock(&bdev->vm_lock);
    bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,bo->mem.num_pages,0);

    if (unlikely(bo->vm_node == NULL)) {
        ret = -ENOMEM;
        goto out_unlock;
    }

    bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,0);

    if (unlikely(bo->vm_node == NULL)) {
        write_unlock(&bdev->vm_lock);
        goto retry_pre_get;
    }

    ttm_bo_vm_insert_rb(bo);
    write_unlock(&bdev->vm_lock);
    bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;

    return 0;
out_unlock:
    write_unlock(&bdev->vm_lock);
    return ret;
}

但是我很难找出锁定和解锁部分中的代码如何在GPU端分配内存。
另外,我偶然发现了将近10年前问过的this question,并想知道答案是否仍然有效-是否可以使用常规的malloc()free()进行分配并在GPU端释放内存。

我知道这个问题有些含糊,但我不知道我还能提供什么其他信息以使其更清楚。任何指示或想法将不胜感激。
预先感谢!

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)