From 031abf0b70cb6804eefb11340463a2277e52f853 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:39 +0200 Subject: [PATCH 1/9] sparc/iommu: use !PageHighMem to check if a page has a kernel mapping This deobsfucates the check a bit, and prepares for future changes. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index e8d5d73ca40d6..dcdadac03fdfe 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -273,7 +273,8 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, * XXX Is this a good assumption? * XXX What if someone else unmaps it here and races us? */ - if ((page = (unsigned long) page_address(sg_page(sg))) != 0) { + if (!PageHighMem(sg_page(sg))) { + page = (unsigned long)page_address(sg_page(sg)); for (i = 0; i < n; i++) { if (page != oldpage) { /* Already flushed? */ flush_page_for_dma(page); From a7fce1f7ca2f092fe44a17cb158deda97060aab4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:40 +0200 Subject: [PATCH 2/9] sparc/iommu: use sbus_iommu_unmap_page in sbus_iommu_unmap_sg Use the page-level helper instead of duplicating the logic, while also fixing the incorrect handling of larger than page sized offsets in the sg variant. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index dcdadac03fdfe..f47a6ce0acaa0 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -321,11 +321,11 @@ static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; - int i, n; + int i; for_each_sg(sgl, sg, nents, i) { - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; - iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); + sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir, + attrs); sg->dma_address = 0x21212121; } } From f25b23bc156fef3211fe4adf9692eca5ce2fd082 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:41 +0200 Subject: [PATCH 3/9] sparc/iommu: merge iommu_release_one and sbus_iommu_unmap_page There is only one caller of iommu_release_one left, so merge it into that one to clean things up a bit. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index f47a6ce0acaa0..7cb9ddda75318 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -291,14 +291,17 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, return nents; } -static void iommu_release_one(struct device *dev, u32 busa, int npages) +static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t len, enum dma_data_direction dir, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; - int ioptex; - int i; + unsigned int busa = dma_addr & PAGE_MASK; + unsigned long off = dma_addr & ~PAGE_MASK; + unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; + unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT; + unsigned int i; BUG_ON(busa < iommu->start); - ioptex = (busa - iommu->start) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { iopte_val(iommu->page_table[ioptex + i]) = 0; iommu_invalidate_page(iommu->regs, busa); @@ -307,16 +310,6 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages) bit_map_clear(&iommu->usemap, ioptex, npages); } -static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t len, enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long off = dma_addr & ~PAGE_MASK; - int npages; - - npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; - iommu_release_one(dev, dma_addr & PAGE_MASK, npages); -} - static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { From ff5cbec0c3ea8b96c4cb7bcd9f484d8665d394e6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:42 +0200 Subject: [PATCH 4/9] sparc/iommu: create a common helper for map_sg Share the code for the global and per-page flush map_sg loops using a simple bool parameter to disable the per-page flush for the former variant. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 7cb9ddda75318..f90d943a3a272 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -241,25 +241,9 @@ static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, return __sbus_iommu_map_page(dev, page, offset, len); } -static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i, n; - - flush_page_for_dma(0); - - for_each_sg(sgl, sg, nents, i) { - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; - sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; - sg->dma_length = sg->length; - } - - return nents; -} - -static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) +static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs, + bool per_page_flush) { unsigned long page, oldpage = 0; struct scatterlist *sg; @@ -273,7 +257,7 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, * XXX Is this a good assumption? * XXX What if someone else unmaps it here and races us? */ - if (!PageHighMem(sg_page(sg))) { + if (per_page_flush && !PageHighMem(sg_page(sg))) { page = (unsigned long)page_address(sg_page(sg)); for (i = 0; i < n; i++) { if (page != oldpage) { /* Already flushed? */ @@ -291,6 +275,19 @@ static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, return nents; } +static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + flush_page_for_dma(0); + return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false); +} + +static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true); +} + static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t len, enum dma_data_direction dir, unsigned long attrs) { From b82059428c0577c2ec082974d7956291d5eae2cf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:43 +0200 Subject: [PATCH 5/9] sparc/iommu: pass a physical address to iommu_get_one No need for the page structure, just the paddr / pfn. This is going to simplify fixes to the callers. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index f90d943a3a272..19d9266e4049d 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -175,16 +175,17 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) } } -static u32 iommu_get_one(struct device *dev, struct page *page, int npages) +static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages) { struct iommu_struct *iommu = dev->archdata.iommu; int ioptex; iopte_t *iopte, *iopte0; unsigned int busa, busa0; + unsigned long pfn = __phys_to_pfn(paddr); int i; /* page color = pfn of page */ - ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); + ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); if (ioptex < 0) panic("iommu out"); busa0 = iommu->start + (ioptex << PAGE_SHIFT); @@ -193,11 +194,11 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages) busa = busa0; iopte = iopte0; for (i = 0; i < npages; i++) { - iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM); + iopte_val(*iopte) = MKIOPTE(pfn, IOPERM); iommu_invalidate_page(iommu->regs, busa); busa += PAGE_SIZE; iopte++; - page++; + pfn++; } iommu_flush_iotlb(iopte0, npages); @@ -215,7 +216,7 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; - return iommu_get_one(dev, virt_to_page(vaddr), npages) + off; + return iommu_get_one(dev, virt_to_phys(vaddr), npages) + off; } static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, @@ -268,7 +269,7 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, } } - sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; + sg->dma_address = iommu_get_one(dev, sg_phys(sg), n) + sg->offset; sg->dma_length = sg->length; } From 8668b38c1c7720baf76da15a7a7eef43ae0c65a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:44 +0200 Subject: [PATCH 6/9] sparc/iommu: move per-page flushing into __sbus_iommu_map_page This prepares for reusing __sbus_iommu_map_page in the map_sg path. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 19d9266e4049d..7e191c8ae46a3 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -207,15 +207,25 @@ static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages) } static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t len) + unsigned long offset, size_t len, bool per_page_flush) { void *vaddr = page_address(page) + offset; unsigned long off = (unsigned long)vaddr & ~PAGE_MASK; unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - + /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; + + if (per_page_flush) { + unsigned long p = (unsigned long)vaddr & PAGE_MASK; + + while (p < (unsigned long)vaddr + len) { + flush_page_for_dma(p); + p += PAGE_SIZE; + } + } + return iommu_get_one(dev, virt_to_phys(vaddr), npages) + off; } @@ -224,22 +234,14 @@ static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { flush_page_for_dma(0); - return __sbus_iommu_map_page(dev, page, offset, len); + return __sbus_iommu_map_page(dev, page, offset, len, false); } static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, struct page *page, unsigned long offset, size_t len, enum dma_data_direction dir, unsigned long attrs) { - void *vaddr = page_address(page) + offset; - unsigned long p = ((unsigned long)vaddr) & PAGE_MASK; - - while (p < (unsigned long)vaddr + len) { - flush_page_for_dma(p); - p += PAGE_SIZE; - } - - return __sbus_iommu_map_page(dev, page, offset, len); + return __sbus_iommu_map_page(dev, page, offset, len, true); } static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, From 7e996890b88078011bfb55ce072712d464207dad Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:45 +0200 Subject: [PATCH 7/9] sparc/iommu: fix __sbus_iommu_map_page for highmem pages __sbus_iommu_map_page currently assumes all pages are mapped into the kernel direct mapping. Switch to using physical address instead of virtual ones for all the normal mapping operations, and only use the virtual addresses for cache flushing when not operating on a highmem page. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 7e191c8ae46a3..37b5ce7657f61 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -209,24 +209,23 @@ static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages) static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t len, bool per_page_flush) { - void *vaddr = page_address(page) + offset; - unsigned long off = (unsigned long)vaddr & ~PAGE_MASK; + phys_addr_t paddr = page_to_phys(page) + offset; + unsigned long off = paddr & ~PAGE_MASK; unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; - if (per_page_flush) { - unsigned long p = (unsigned long)vaddr & PAGE_MASK; + if (per_page_flush && !PageHighMem(page)) { + unsigned long vaddr, p; - while (p < (unsigned long)vaddr + len) { + vaddr = (unsigned long)page_address(page) + offset; + for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) flush_page_for_dma(p); - p += PAGE_SIZE; - } } - return iommu_get_one(dev, virt_to_phys(vaddr), npages) + off; + return iommu_get_one(dev, paddr, npages) + off; } static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, From edb1f07203ba8856b24bcddf8326386ba6a03291 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:46 +0200 Subject: [PATCH 8/9] sparc/iommu: use __sbus_iommu_map_page to implement the map_sg path This means we handle > PAGE_SIZE offsets fine, and grow the size check so far only performed in the map_page path. We lose the optimization to not double flush a page if it apears in multiple consecutive SG list entries. But at least for block I/O those don't happen anymore since we properly merge in higher layers anyway. Signed-off-by: Christoph Hellwig Reported-by: Guenter Roeck Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 37b5ce7657f61..8fbc08d148361 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -217,6 +217,11 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; + /* + * We expect unmapped highmem pages to be not in the cache. + * XXX Is this a good assumption? + * XXX What if someone else unmaps it here and races us? + */ if (per_page_flush && !PageHighMem(page)) { unsigned long vaddr, p; @@ -247,30 +252,14 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs, bool per_page_flush) { - unsigned long page, oldpage = 0; struct scatterlist *sg; - int i, j, n; + int j; for_each_sg(sgl, sg, nents, j) { - n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; - - /* - * We expect unmapped highmem pages to be not in the cache. - * XXX Is this a good assumption? - * XXX What if someone else unmaps it here and races us? - */ - if (per_page_flush && !PageHighMem(sg_page(sg))) { - page = (unsigned long)page_address(sg_page(sg)); - for (i = 0; i < n; i++) { - if (page != oldpage) { /* Already flushed? */ - flush_page_for_dma(page); - oldpage = page; - } - page += PAGE_SIZE; - } - } - - sg->dma_address = iommu_get_one(dev, sg_phys(sg), n) + sg->offset; + sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg), + sg->offset, sg->length, per_page_flush); + if (sg->dma_address == DMA_MAPPING_ERROR) + return 0; sg->dma_length = sg->length; } From 376b1371a9f29112ae000cc0cade174a9a670053 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 16 Apr 2019 20:23:47 +0200 Subject: [PATCH 9/9] sparc/iommu: merge iommu_get_one and __sbus_iommu_map_page There is only one caller of iommu_get_one left, so merge it into that one to clean things up a bit. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- arch/sparc/mm/iommu.c | 56 +++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 8fbc08d148361..71ac353032b68 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -175,43 +175,17 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) } } -static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages) -{ - struct iommu_struct *iommu = dev->archdata.iommu; - int ioptex; - iopte_t *iopte, *iopte0; - unsigned int busa, busa0; - unsigned long pfn = __phys_to_pfn(paddr); - int i; - - /* page color = pfn of page */ - ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); - if (ioptex < 0) - panic("iommu out"); - busa0 = iommu->start + (ioptex << PAGE_SHIFT); - iopte0 = &iommu->page_table[ioptex]; - - busa = busa0; - iopte = iopte0; - for (i = 0; i < npages; i++) { - iopte_val(*iopte) = MKIOPTE(pfn, IOPERM); - iommu_invalidate_page(iommu->regs, busa); - busa += PAGE_SIZE; - iopte++; - pfn++; - } - - iommu_flush_iotlb(iopte0, npages); - - return busa0; -} - static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t len, bool per_page_flush) { + struct iommu_struct *iommu = dev->archdata.iommu; phys_addr_t paddr = page_to_phys(page) + offset; unsigned long off = paddr & ~PAGE_MASK; unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long pfn = __phys_to_pfn(paddr); + unsigned int busa, busa0; + iopte_t *iopte, *iopte0; + int ioptex, i; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) @@ -230,7 +204,25 @@ static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, flush_page_for_dma(p); } - return iommu_get_one(dev, paddr, npages) + off; + /* page color = pfn of page */ + ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); + if (ioptex < 0) + panic("iommu out"); + busa0 = iommu->start + (ioptex << PAGE_SHIFT); + iopte0 = &iommu->page_table[ioptex]; + + busa = busa0; + iopte = iopte0; + for (i = 0; i < npages; i++) { + iopte_val(*iopte) = MKIOPTE(pfn, IOPERM); + iommu_invalidate_page(iommu->regs, busa); + busa += PAGE_SIZE; + iopte++; + pfn++; + } + + iommu_flush_iotlb(iopte0, npages); + return busa0 + off; } static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,