Skip to content

Commit

Permalink
drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size
Browse files Browse the repository at this point in the history
Etnaviv assumes that GPU page size is 4KiB, however, GPUVA ranges collision
when using softpin capable GPUs on a non 4KiB CPU page size configuration.
The root cause is that kernel side BO takes up bigger address space than
userspace expect, the size of backing memory of GEM buffer objects are
required to align to the CPU PAGE_SIZE. Therefore, results in userspace
allocated GPUVA range fails to be inserted to the specified hole exactly.

To solve this problem, record the GPU visiable size of a BO firstly, then
map and unmap the SG entry strictly with respect to the total GPUVA size.

Signed-off-by: Sui Jingfeng <sui.jingfeng@linux.dev>
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
  • Loading branch information
Sui Jingfeng authored and Lucas Stach committed Oct 28, 2024
1 parent b5f1eed commit 68786b7
Showing 1 changed file with 13 additions and 25 deletions.
38 changes: 13 additions & 25 deletions drivers/gpu/drm/etnaviv/etnaviv_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
return ret;
}

static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
u32 iova, unsigned int va_len,
struct sg_table *sgt, int prot)
{ struct scatterlist *sg;
{
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i;
int ret;
Expand All @@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,

for_each_sgtable_dma_sg(sgt, sg, i) {
phys_addr_t pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
unsigned int da_len = sg_dma_len(sg) + sg->offset;
unsigned int bytes = min_t(unsigned int, da_len, va_len);

VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);

ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;

va_len -= bytes;
da += bytes;
}

Expand All @@ -104,21 +108,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
struct scatterlist *sg;
unsigned int da = iova;
int i;

for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;

etnaviv_context_unmap(context, da, bytes);

VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);

BUG_ON(!PAGE_ALIGNED(bytes));

da += bytes;
}
etnaviv_context_unmap(context, iova, len);

context->flush_seq++;
}
Expand All @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
lockdep_assert_held(&context->lock);

etnaviv_iommu_unmap(context, mapping->vram_node.start,
etnaviv_obj->sgt, etnaviv_obj->base.size);
etnaviv_obj->sgt, etnaviv_obj->size);
drm_mm_remove_node(&mapping->vram_node);
}

Expand Down Expand Up @@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
node = &mapping->vram_node;

if (va)
ret = etnaviv_iommu_insert_exact(context, node,
etnaviv_obj->base.size, va);
ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
else
ret = etnaviv_iommu_find_iova(context, node,
etnaviv_obj->base.size);
ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
if (ret < 0)
goto unlock;

mapping->iova = node->start;
ret = etnaviv_iommu_map(context, node->start, sgt,
ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);

if (ret < 0) {
Expand Down

0 comments on commit 68786b7

Please sign in to comment.