Skip to content

Commit

Permalink
Merge branch 'for-5.10-drm-sg-fix' of https://github.com/mszyprow/linux
Browse files Browse the repository at this point in the history
… into drm-next

Please pull a set of fixes for various DRM drivers that finally resolve
incorrect usage of the scatterlists (struct sg_table nents and orig_nents
entries), what causes issues when IOMMU is used.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200910080505.24456-1-m.szyprowski@samsung.com
  • Loading branch information
Dave Airlie committed Sep 17, 2020
2 parents 818280d + be0704b commit b40be05
Show file tree
Hide file tree
Showing 36 changed files with 234 additions and 341 deletions.
13 changes: 6 additions & 7 deletions drivers/dma-buf/heaps/heap-helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,21 +140,20 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heaps_attachment *a = attachment->priv;
struct sg_table *table;

table = &a->table;
struct sg_table *table = &a->table;
int ret;

if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
direction))
table = ERR_PTR(-ENOMEM);
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
if (ret)
table = ERR_PTR(ret);
return table;
}

static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
dma_unmap_sgtable(attachment->dev, table, direction, 0);
}

static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
Expand Down
7 changes: 3 additions & 4 deletions drivers/dma-buf/udmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
GFP_KERNEL);
if (ret < 0)
goto err;
if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
ret = -EINVAL;
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
goto err;
}
return sg;

err:
Expand All @@ -78,7 +77,7 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
static void put_sg_table(struct device *dev, struct sg_table *sg,
enum dma_data_direction direction)
{
dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
dma_unmap_sgtable(dev, sg, direction, 0);
sg_free_table(sg);
kfree(sg);
}
Expand Down
24 changes: 11 additions & 13 deletions drivers/gpu/drm/armada/armada_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct scatterlist *sg;
struct sg_table *sgt;
int i, num;
int i;

sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
Expand All @@ -395,30 +395,26 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,

mapping = dobj->obj.filp->f_mapping;

for_each_sg(sgt->sgl, sg, count, i) {
for_each_sgtable_sg(sgt, sg, i) {
struct page *page;

page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
num = i;
if (IS_ERR(page))
goto release;
}

sg_set_page(sg, page, PAGE_SIZE, 0);
}

if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
num = sgt->nents;
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto release;
}
} else if (dobj->page) {
/* Single contiguous page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free_sgt;

sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);

if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto free_table;
} else if (dobj->linear) {
/* Single contiguous physical region - no struct page */
Expand All @@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return sgt;

release:
for_each_sg(sgt->sgl, sg, num, i)
put_page(sg_page(sg));
for_each_sgtable_sg(sgt, sg, i)
if (sg_page(sg))
put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
Expand All @@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
int i;

if (!dobj->linear)
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
dma_unmap_sgtable(attach->dev, sgt, dir, 0);

if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)

for_each_sgtable_sg(sgt, sg, i)
put_page(sg_page(sg));
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/drm_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
struct sg_page_iter sg_iter;

mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
for_each_sgtable_page(st, &sg_iter, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb(); /*Make sure that all cache line entry is flushed*/

Expand Down
23 changes: 3 additions & 20 deletions drivers/gpu/drm/drm_gem_cma_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;

if (sgt->nents != 1) {
/* check if the entries in the sg_table are contiguous */
dma_addr_t next_addr = sg_dma_address(sgt->sgl);
struct scatterlist *s;
unsigned int i;

for_each_sg(sgt->sgl, s, sgt->nents, i) {
/*
* sg_dma_address(s) is only valid for entries
* that have sg_dma_len(s) != 0
*/
if (!sg_dma_len(s))
continue;

if (sg_dma_address(s) != next_addr)
return ERR_PTR(-EINVAL);

next_addr = sg_dma_address(s) + sg_dma_len(s);
}
}
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
return ERR_PTR(-EINVAL);

/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
Expand Down
14 changes: 9 additions & 5 deletions drivers/gpu/drm/drm_gem_shmem_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
Expand Down Expand Up @@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)

WARN_ON(!drm_gem_shmem_is_purgeable(shmem));

dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
Expand Down Expand Up @@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
goto err_put_pages;
}
/* Map the pages for use by the h/w. */
dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_free_sgt;

shmem->sgt = sgt;

return sgt;

err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret);
Expand Down
91 changes: 52 additions & 39 deletions drivers/gpu/drm/drm_prime.c
Original file line number Diff line number Diff line change
Expand Up @@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
int ret;

if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
Expand All @@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);

if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC)) {
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
sgt = ERR_PTR(ret);
}

return sgt;
Expand All @@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return;

dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC);
dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}
Expand Down Expand Up @@ -825,6 +826,37 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);

/**
* drm_prime_get_contiguous_size - returns the contiguous size of the buffer
* @sgt: sg_table describing the buffer to check
*
* This helper calculates the contiguous size in the DMA address space
* of the the buffer described by the provided sg_table.
*
* This is useful for implementing
* &drm_gem_object_funcs.gem_prime_import_sg_table.
*/
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
{
dma_addr_t expected = sg_dma_address(sgt->sgl);
struct scatterlist *sg;
unsigned long size = 0;
int i;

for_each_sgtable_dma_sg(sgt, sg, i) {
unsigned int len = sg_dma_len(sg);

if (!len)
break;
if (sg_dma_address(sg) != expected)
break;
expected += len;
size += len;
}
return size;
}
EXPORT_SYMBOL(drm_prime_get_contiguous_size);

/**
* drm_gem_prime_export - helper library implementation of the export callback
* @obj: GEM object to export
Expand Down Expand Up @@ -959,45 +991,26 @@ EXPORT_SYMBOL(drm_gem_prime_import);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
{
unsigned count;
struct scatterlist *sg;
struct page *page;
u32 page_len, page_index;
dma_addr_t addr;
u32 dma_len, dma_index;

/*
* Scatterlist elements contains both pages and DMA addresses, but
* one shoud not assume 1:1 relation between them. The sg->length is
* the size of the physical memory chunk described by the sg->page,
* while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
* described by the sg_dma_address(sg).
*/
page_index = 0;
dma_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
page_len = sg->length;
page = sg_page(sg);
dma_len = sg_dma_len(sg);
addr = sg_dma_address(sg);

while (pages && page_len > 0) {
if (WARN_ON(page_index >= max_entries))
struct sg_dma_page_iter dma_iter;
struct sg_page_iter page_iter;
struct page **p = pages;
dma_addr_t *a = addrs;

if (pages) {
for_each_sgtable_page(sgt, &page_iter, 0) {
if (WARN_ON(p - pages >= max_entries))
return -1;
pages[page_index] = page;
page++;
page_len -= PAGE_SIZE;
page_index++;
*p++ = sg_page_iter_page(&page_iter);
}
while (addrs && dma_len > 0) {
if (WARN_ON(dma_index >= max_entries))
}
if (addrs) {
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
if (WARN_ON(a - addrs >= max_entries))
return -1;
addrs[dma_index] = addr;
addr += PAGE_SIZE;
dma_len -= PAGE_SIZE;
dma_index++;
*a++ = sg_page_iter_dma_address(&dma_iter);
}
}

return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
Expand Down
12 changes: 5 additions & 7 deletions drivers/gpu/drm/etnaviv/etnaviv_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}

static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
Expand All @@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}

/* called with etnaviv_obj->lock held */
Expand Down Expand Up @@ -404,9 +404,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}

if (etnaviv_obj->flags & ETNA_BO_CACHED) {
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}

Expand All @@ -421,8 +420,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
Expand Down
Loading

0 comments on commit b40be05

Please sign in to comment.