From 68786b7f49873c69ec332a045a9bf4337d71ec20 Mon Sep 17 00:00:00 2001 From: Sui Jingfeng Date: Sat, 26 Oct 2024 04:43:55 +0800 Subject: [PATCH] drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size Etnaviv assumes that GPU page size is 4KiB, however, GPUVA ranges collision when using softpin capable GPUs on a non 4KiB CPU page size configuration. The root cause is that kernel side BO takes up bigger address space than userspace expect, the size of backing memory of GEM buffer objects are required to align to the CPU PAGE_SIZE. Therefore, results in userspace allocated GPUVA range fails to be inserted to the specified hole exactly. To solve this problem, record the GPU visiable size of a BO firstly, then map and unmap the SG entry strictly with respect to the total GPUVA size. Signed-off-by: Sui Jingfeng Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 38 +++++++++------------------ 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 02d9408d41bcf..7e065b3723cff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context, return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, + u32 iova, unsigned int va_len, struct sg_table *sgt, int prot) -{ struct scatterlist *sg; +{ + struct scatterlist *sg; unsigned int da = iova; unsigned int i; int ret; @@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, for_each_sgtable_dma_sg(sgt, sg, i) { phys_addr_t pa = sg_dma_address(sg) - sg->offset; - size_t bytes = sg_dma_len(sg) + sg->offset; + unsigned int da_len = sg_dma_len(sg) + sg->offset; + unsigned int bytes = min_t(unsigned int, da_len, va_len); - VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); + VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; + va_len -= bytes; da += bytes; } @@ -104,21 +108,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sgtable_dma_sg(sgt, sg, i) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - - VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - - da += bytes; - } + etnaviv_context_unmap(context, iova, len); context->flush_seq++; } @@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, lockdep_assert_held(&context->lock); etnaviv_iommu_unmap(context, mapping->vram_node.start, - etnaviv_obj->sgt, etnaviv_obj->base.size); + etnaviv_obj->sgt, etnaviv_obj->size); drm_mm_remove_node(&mapping->vram_node); } @@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, node = &mapping->vram_node; if (va) - ret = etnaviv_iommu_insert_exact(context, node, - etnaviv_obj->base.size, va); + ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else - ret = etnaviv_iommu_find_iova(context, node, - etnaviv_obj->base.size); + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(context, node->start, sgt, + ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) {