Skip to content

Commit

Permalink
drm/exynos: add iommu support for exynos drm framework
Browse files Browse the repository at this point in the history
Changelog v4:
- fix condition to drm_iommu_detach_device funtion.

Changelog v3:
- add dma_parms->max_segment_size setting of drm_device->dev.
- use devm_kzalloc instead of kzalloc.

Changelog v2:
- fix iommu attach condition.
  . check archdata.dma_ops of drm device instead of
    subdrv device's one.
- code clean to exynos_drm_iommu.c file.
  . remove '#ifdef CONFIG_ARM_DMA_USE_IOMMU' from exynos_drm_iommu.c
    and add it to driver/gpu/drm/exynos/Kconfig.

Changelog v1:
This patch adds iommu support for exynos drm framework with dma mapping
api. In this patch, we used dma mapping api to allocate physical memory
and maps it with iommu table and removed some existing codes and added
new some codes for iommu support.

GEM allocation requires one device object to use dma mapping api so
this patch uses one iommu mapping for all sub drivers. In other words,
all sub drivers have same iommu mapping.

Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
  • Loading branch information
Inki Dae authored and Inki Dae committed Nov 29, 2012
1 parent 549a17e commit 0519f9a
Show file tree
Hide file tree
Showing 11 changed files with 409 additions and 305 deletions.
6 changes: 6 additions & 0 deletions drivers/gpu/drm/exynos/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.

config DRM_EXYNOS_IOMMU
bool "EXYNOS DRM IOMMU Support"
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
help
Choose this option if you want to use IOMMU feature for DRM.

config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/exynos/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o

exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
Expand Down
88 changes: 32 additions & 56 deletions drivers/gpu/drm/exynos/exynos_drm_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,71 +33,58 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
int ret = 0;
unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;

DRM_DEBUG_KMS("%s\n", __FILE__);

if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return -EINVAL;
}

if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}

if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
buf->page_size = PAGE_SIZE;
init_dma_attrs(&buf->dma_attrs);

if (flags & EXYNOS_BO_NONCONTIG)
attr = DMA_ATTR_WRITE_COMBINE;

dma_set_attr(attr, &buf->dma_attrs);

buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}

buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
return -ENOMEM;
ret = -ENOMEM;
goto err_free_attrs;
}

ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
buf->size);
if (ret < 0) {
DRM_ERROR("failed to initialize sg table.\n");
kfree(buf->sgt);
buf->sgt = NULL;
return -ENOMEM;
DRM_ERROR("failed to get sgtable.\n");
goto err_free_sgt;
}

buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
&buf->dma_addr, GFP_KERNEL);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
goto err1;
}
npages = buf->sgt->nents;

buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err2;
goto err_free_table;
}

sgl = buf->sgt->sgl;
start_addr = buf->dma_addr;

while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
start_addr += buf->page_size;
buf->pages[i] = sg_page(sgl);
sgl = sg_next(sgl);
i++;
}
Expand All @@ -108,14 +95,16 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
buf->size);

return ret;
err2:
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
buf->dma_addr = (dma_addr_t)NULL;
err1:

err_free_table:
sg_free_table(buf->sgt);
err_free_sgt:
kfree(buf->sgt);
buf->sgt = NULL;
err_free_attrs:
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;

return ret;
}
Expand All @@ -125,16 +114,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);

/*
* release only physically continuous memory and
* non-continuous memory would be released by exynos
* gem framework.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
DRM_DEBUG_KMS("not support allocation type.\n");
return;
}

if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
Expand All @@ -150,11 +129,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;

kfree(buf->pages);
buf->pages = NULL;

dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr);
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}

Expand Down
87 changes: 37 additions & 50 deletions drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,29 +30,31 @@

#include <linux/dma-buf.h>

static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
unsigned int page_size)
static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
struct exynos_drm_gem_buf *buf)
{
struct sg_table *sgt = NULL;
struct scatterlist *sgl;
int i, ret;
int ret;

sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
goto out;

ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
ret = sg_alloc_table(sgt, buf->sgt->nents, GFP_KERNEL);
if (ret)
goto err_free_sgt;

if (page_size < PAGE_SIZE)
page_size = PAGE_SIZE;

for_each_sg(sgt->sgl, sgl, nr_pages, i)
sg_set_page(sgl, pages[i], page_size, 0);
ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
buf->dma_addr, buf->size);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_free_table;
}

return sgt;

err_free_table:
sg_free_table(sgt);
err_free_sgt:
kfree(sgt);
sgt = NULL;
Expand All @@ -68,32 +70,31 @@ static struct sg_table *
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL;
unsigned int npages;
int nents;

DRM_DEBUG_PRIME("%s\n", __FILE__);

mutex_lock(&dev->struct_mutex);

buf = gem_obj->buffer;

/* there should always be pages allocated. */
if (!buf->pages) {
DRM_ERROR("pages is null.\n");
goto err_unlock;
if (!buf) {
DRM_ERROR("buffer is null.\n");
return sgt;
}

npages = buf->size / buf->page_size;
mutex_lock(&dev->struct_mutex);

sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
if (!sgt) {
DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
sgt = exynos_get_sgt(dev, buf);
if (!sgt)
goto err_unlock;
}

nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n");
sgt = NULL;
goto err_unlock;
}

DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
npages, buf->size, buf->page_size);
DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
buf->size, buf->page_size);

err_unlock:
mutex_unlock(&dev->struct_mutex);
Expand All @@ -105,6 +106,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);

sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
Expand Down Expand Up @@ -196,7 +198,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
struct page *page;
int ret;

DRM_DEBUG_PRIME("%s\n", __FILE__);
Expand Down Expand Up @@ -233,38 +234,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}

buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
if (!buffer->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
goto err_free_buffer;
}

exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
goto err_free_pages;
goto err_free_buffer;
}

sgl = sgt->sgl;

if (sgt->nents == 1) {
buffer->dma_addr = sg_dma_address(sgt->sgl);
buffer->size = sg_dma_len(sgt->sgl);
buffer->size = dma_buf->size;
buffer->dma_addr = sg_dma_address(sgl);

if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
unsigned int i = 0;

buffer->dma_addr = sg_dma_address(sgl);
while (i < sgt->nents) {
buffer->pages[i] = sg_page(sgl);
buffer->size += sg_dma_len(sgl);
sgl = sg_next(sgl);
i++;
}

/*
* this case could be CONTIG or NONCONTIG type but for now
* sets NONCONTIG.
* TODO. we have to find a way that exporter can notify
* the type of its own buffer to importer.
*/
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}

Expand All @@ -277,9 +267,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,

return &exynos_gem_obj->base;

err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
Expand Down
Loading

0 comments on commit 0519f9a

Please sign in to comment.