Skip to content

Commit

Permalink
drm/amdgpu: add support for exporting VRAM using DMA-buf v3
Browse files Browse the repository at this point in the history
We should be able to do this now after checking all the prerequisites.

v2: fix entrie count in the sgt
v3: manually construct the sg

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/359295
  • Loading branch information
Christian König committed Apr 1, 2020
1 parent 48262cd commit f44ffd6
Show file tree
Hide file tree
Showing 3 changed files with 153 additions and 14 deletions.
56 changes: 43 additions & 13 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct sg_table *sgt;
long r;

if (!bo->pin_count) {
/* move buffer into GTT */
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
unsigned domains = AMDGPU_GEM_DOMAIN_GTT;

amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
attach->peer2peer) {
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
domains |= AMDGPU_GEM_DOMAIN_VRAM;
}
amdgpu_bo_placement_from_domain(bo, domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
Expand All @@ -293,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
return ERR_PTR(-EBUSY);
}

sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
return sgt;

if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC))
goto error_free;
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
bo->tbo.num_pages);
if (IS_ERR(sgt))
return sgt;

if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC))
goto error_free;
break;

case TTM_PL_VRAM:
r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
dir, &sgt);
if (r)
return ERR_PTR(r);
break;
default:
return ERR_PTR(-EINVAL);
}

return sgt;

error_free:
sg_free_table(sgt);
kfree(sgt);
return ERR_PTR(-ENOMEM);
return ERR_PTR(-EBUSY);
}

/**
Expand All @@ -322,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);

if (sgt->sgl->page_link) {
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
} else {
amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
}
}

/**
Expand Down
12 changes: 11 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__

#include "amdgpu.h"
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
#include "amdgpu.h"

#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
Expand Down Expand Up @@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);

u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_mem_reg *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);

Expand Down
99 changes: 99 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
* Authors: Christian König
*/

#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_atomfirmware.h"
Expand Down Expand Up @@ -458,6 +459,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
mem->mm_node = NULL;
}

/**
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
* @mem: TTM memory object
* @dev: the other device
* @dir: dma direction
* @sgt: resulting sg table
*
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_mem_reg *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
{
struct drm_mm_node *node;
struct scatterlist *sg;
int num_entries = 0;
unsigned int pages;
int i, r;

*sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;

for (pages = mem->num_pages, node = mem->mm_node;
pages; pages -= node->size, ++node)
++num_entries;

r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
if (r)
goto error_free;

for_each_sg((*sgt)->sgl, sg, num_entries, i)
sg->length = 0;

node = mem->mm_node;
for_each_sg((*sgt)->sgl, sg, num_entries, i) {
phys_addr_t phys = (node->start << PAGE_SHIFT) +
adev->gmc.aper_base;
size_t size = node->size << PAGE_SHIFT;
dma_addr_t addr;

++node;
addr = dma_map_resource(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
r = dma_mapping_error(dev, addr);
if (r)
goto error_unmap;

sg_set_page(sg, NULL, size, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
}
return 0;

error_unmap:
for_each_sg((*sgt)->sgl, sg, num_entries, i) {
if (!sg->length)
continue;

dma_unmap_resource(dev, sg->dma_address,
sg->length, dir,
DMA_ATTR_SKIP_CPU_SYNC);
}
sg_free_table(*sgt);

error_free:
kfree(*sgt);
return r;
}

/**
* amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
*
* @adev: amdgpu device pointer
* @sgt: sg table to free
*
* Free a previously allocate sg table.
*/
void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt)
{
struct scatterlist *sg;
int i;

for_each_sg(sgt->sgl, sg, sgt->nents, i)
dma_unmap_resource(dev, sg->dma_address,
sg->length, dir,
DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}

/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
Expand Down

0 comments on commit f44ffd6

Please sign in to comment.