Skip to content

Commit

Permalink
ARM: dma-mapping: Always pass proper prot flags to iommu_map()
Browse files Browse the repository at this point in the history
... otherwise it is impossible for the low level iommu driver to
figure out which pte flags should be used.

In __map_sg_chunk we can derive the flags from dma_data_direction.

In __iommu_create_mapping we should treat the memory like
DMA_BIDIRECTIONAL and pass both IOMMU_READ and IOMMU_WRITE to
iommu_map.
__iommu_create_mapping is used during dma_alloc_coherent (via
arm_iommu_alloc_attrs).  AFAIK dma_alloc_coherent is responsible for
allocation _and_ mapping.  I think this implies that access to the
mapped pages should be allowed.

Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Andreas Herrmann <andreas.herrmann@calxeda.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
  • Loading branch information
Andreas Herrmann authored and Marek Szyprowski committed Oct 2, 2013
1 parent 15c03dd commit c9b2499
Showing 1 changed file with 28 additions and 15 deletions.
43 changes: 28 additions & 15 deletions arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
break;

len = (j - i) << PAGE_SHIFT;
ret = iommu_map(mapping->domain, iova, phys, len, 0);
ret = iommu_map(mapping->domain, iova, phys, len,
IOMMU_READ|IOMMU_WRITE);
if (ret < 0)
goto fail;
iova += len;
Expand Down Expand Up @@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}

static int __dma_direction_to_prot(enum dma_data_direction dir)
{
int prot;

switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}

return prot;
}

/*
* Map a part of the scatter-gather list into contiguous io address space
*/
Expand All @@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int ret = 0;
unsigned int count;
struct scatterlist *s;
int prot;

size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE;
Expand All @@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);

ret = iommu_map(mapping->domain, iova, phys, len, 0);
prot = __dma_direction_to_prot(dir);

ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
Expand Down Expand Up @@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;

switch (dir) {
case DMA_BIDIRECTIONAL:
prot = IOMMU_READ | IOMMU_WRITE;
break;
case DMA_TO_DEVICE:
prot = IOMMU_READ;
break;
case DMA_FROM_DEVICE:
prot = IOMMU_WRITE;
break;
default:
prot = 0;
}
prot = __dma_direction_to_prot(dir);

ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)
Expand Down

0 comments on commit c9b2499

Please sign in to comment.