Skip to content

Commit

Permalink
drm/v3d: Use the new shmem helpers to reduce driver boilerplate.
Browse files Browse the repository at this point in the history
The new shmem helpers from Noralf and Rob abstract out a bunch of our
BO creation and mapping code.

v2: Use the new sgt getter, and flag pages as dirty before freeing.
v3: Remove the mismatched put_pages.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20190314163451.13431-1-eric@anholt.net
Reviewed-by: Rob Herring <robh@kernel.org> (v2)
  • Loading branch information
Eric Anholt committed Mar 14, 2019
1 parent 2194a63 commit 40609d4
Show file tree
Hide file tree
Showing 7 changed files with 115 additions and 269 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/v3d/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ config DRM_V3D
depends on COMMON_CLK
depends on MMU
select DRM_SCHED
select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
V3D 3.x or newer GPU, such as BCM7268.
311 changes: 90 additions & 221 deletions drivers/gpu/drm/v3d/v3d_bo.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,158 +25,6 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"

/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
* it for DMA.
*/
static int
v3d_bo_get_pages(struct v3d_bo *bo)
{
struct drm_gem_object *obj = &bo->base;
struct drm_device *dev = obj->dev;
int npages = obj->size >> PAGE_SHIFT;
int ret = 0;

mutex_lock(&bo->lock);
if (bo->pages_refcount++ != 0)
goto unlock;

if (!obj->import_attach) {
bo->pages = drm_gem_get_pages(obj);
if (IS_ERR(bo->pages)) {
ret = PTR_ERR(bo->pages);
goto unlock;
}

bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
if (IS_ERR(bo->sgt)) {
ret = PTR_ERR(bo->sgt);
goto put_pages;
}

/* Map the pages for use by the GPU. */
dma_map_sg(dev->dev, bo->sgt->sgl,
bo->sgt->nents, DMA_BIDIRECTIONAL);
} else {
bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
if (!bo->pages)
goto put_pages;

drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
NULL, npages);

/* Note that dma-bufs come in mapped. */
}

mutex_unlock(&bo->lock);

return 0;

put_pages:
drm_gem_put_pages(obj, bo->pages, true, true);
bo->pages = NULL;
unlock:
bo->pages_refcount--;
mutex_unlock(&bo->lock);
return ret;
}

static void
v3d_bo_put_pages(struct v3d_bo *bo)
{
struct drm_gem_object *obj = &bo->base;

mutex_lock(&bo->lock);
if (--bo->pages_refcount == 0) {
if (!obj->import_attach) {
dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
bo->sgt->nents, DMA_BIDIRECTIONAL);
sg_free_table(bo->sgt);
kfree(bo->sgt);
drm_gem_put_pages(obj, bo->pages, true, true);
} else {
kfree(bo->pages);
}
}
mutex_unlock(&bo->lock);
}

static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
size_t unaligned_size)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_gem_object *obj;
struct v3d_bo *bo;
size_t size = roundup(unaligned_size, PAGE_SIZE);
int ret;

if (size == 0)
return ERR_PTR(-EINVAL);

bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
obj = &bo->base;

INIT_LIST_HEAD(&bo->unref_head);
mutex_init(&bo->lock);

ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto free_bo;

spin_lock(&v3d->mm_lock);
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
obj->size >> PAGE_SHIFT,
GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
spin_unlock(&v3d->mm_lock);
if (ret)
goto free_obj;

return bo;

free_obj:
drm_gem_object_release(obj);
free_bo:
kfree(bo);
return ERR_PTR(ret);
}

struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t unaligned_size)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_gem_object *obj;
struct v3d_bo *bo;
int ret;

bo = v3d_bo_create_struct(dev, unaligned_size);
if (IS_ERR(bo))
return bo;
obj = &bo->base;

ret = v3d_bo_get_pages(bo);
if (ret)
goto free_mm;

v3d_mmu_insert_ptes(bo);

mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated++;
v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);

return bo;

free_mm:
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);

drm_gem_object_release(obj);
kfree(bo);
return ERR_PTR(ret);
}

/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
Expand All @@ -185,111 +33,135 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);

v3d_mmu_remove_ptes(bo);

mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);

v3d_bo_put_pages(bo);

if (obj->import_attach)
drm_prime_gem_destroy(obj, bo->sgt);

v3d_mmu_remove_ptes(bo);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);

mutex_destroy(&bo->lock);
/* GPU execution may have dirtied any pages in the BO. */
bo->base.pages_mark_dirty_on_put = true;

drm_gem_object_release(obj);
kfree(bo);
drm_gem_shmem_free_object(obj);
}

static void
v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
static const struct drm_gem_object_funcs v3d_gem_funcs = {
.free = v3d_free_object,
.print_info = drm_gem_shmem_print_info,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};

/* gem_create_object function for allocating a BO struct and doing
* early setup.
*/
struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
}
struct v3d_bo *bo;
struct drm_gem_object *obj;

vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct v3d_bo *bo = to_v3d_bo(obj);
pfn_t pfn;
pgoff_t pgoff;
if (size == 0)
return NULL;

/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return NULL;
obj = &bo->base.base;

return vmf_insert_mixed(vma, vmf->address, pfn);
obj->funcs = &v3d_gem_funcs;

INIT_LIST_HEAD(&bo->unref_head);

return &bo->base.base;
}

int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
static int
v3d_bo_create_finish(struct drm_gem_object *obj)
{
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
struct sg_table *sgt;
int ret;

ret = drm_gem_mmap(filp, vma);
/* So far we pin the BO in the MMU for its lifetime, so use
* shmem's helper for getting a lifetime sgt.
*/
sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
if (IS_ERR(sgt))
return PTR_ERR(sgt);

spin_lock(&v3d->mm_lock);
/* Allocate the object's space in the GPU's page tables.
* Inserting PTEs will happen later, but the offset is for the
* lifetime of the BO.
*/
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
obj->size >> PAGE_SHIFT,
GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
spin_unlock(&v3d->mm_lock);
if (ret)
return ret;

v3d_set_mmap_vma_flags(vma);
/* Track stats for /debug/dri/n/bo_stats. */
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated++;
v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);

v3d_mmu_insert_ptes(bo);

return ret;
return 0;
}

int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t unaligned_size)
{
struct drm_gem_shmem_object *shmem_obj;
struct v3d_bo *bo;
int ret;

ret = drm_gem_mmap_obj(obj, obj->size, vma);
if (ret < 0)
return ret;

v3d_set_mmap_vma_flags(vma);
shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
if (!shmem_obj)
return NULL;
bo = to_v3d_bo(&shmem_obj->base);

return 0;
}
ret = v3d_bo_create_finish(&shmem_obj->base);
if (ret)
goto free_obj;

struct sg_table *
v3d_prime_get_sg_table(struct drm_gem_object *obj)
{
struct v3d_bo *bo = to_v3d_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
return bo;

return drm_prime_pages_to_sg(bo->pages, npages);
free_obj:
drm_gem_shmem_free_object(&shmem_obj->base);
return ERR_PTR(ret);
}

struct drm_gem_object *
v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_gem_object *obj;
struct v3d_bo *bo;

bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
if (IS_ERR(bo))
return ERR_CAST(bo);
obj = &bo->base;

obj->resv = attach->dmabuf->resv;

bo->sgt = sgt;
obj->import_attach = attach;
v3d_bo_get_pages(bo);
int ret;

mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated++;
v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return obj;

v3d_mmu_insert_ptes(bo);
ret = v3d_bo_create_finish(obj);
if (ret) {
drm_gem_shmem_free_object(obj);
return ERR_PTR(ret);
}

return obj;
}
Expand All @@ -312,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,

args->offset = bo->node.start << PAGE_SHIFT;

ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
drm_gem_object_put_unlocked(&bo->base);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put_unlocked(&bo->base.base);

return ret;
}
Expand All @@ -323,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
int ret;

if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
Expand All @@ -336,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}

ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put_unlocked(gem_obj);

return ret;
return 0;
}

int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
Expand Down
Loading

0 comments on commit 40609d4

Please sign in to comment.