Skip to content

Commit

Permalink
xen/arm/arm64: introduce xen_arch_need_swiotlb
Browse files Browse the repository at this point in the history
Introduce an arch specific function to find out whether a particular dma
mapping operation needs to bounce on the swiotlb buffer.

On ARM and ARM64, if the page involved is a foreign page and the device
is not coherent, we need to bounce because at unmap time we cannot
execute any required cache maintenance operations (we don't know how to
find the pfn from the mfn).

No change of behaviour for x86.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  • Loading branch information
Stefano Stabellini authored and David Vrabel committed Dec 4, 2014
1 parent 5121872 commit a4dba13
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 1 deletion.
4 changes: 4 additions & 0 deletions arch/arm/include/asm/xen/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
#define xen_unmap(cookie) iounmap((cookie))

bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn);

#endif /* _ASM_ARM_XEN_PAGE_H */
7 changes: 7 additions & 0 deletions arch/arm/xen/mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,13 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
}

bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn)
{
return ((pfn != mfn) && !is_device_dma_coherent(dev));
}

int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/include/asm/xen/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
#define xen_remap(cookie, size) ioremap((cookie), (size));
#define xen_unmap(cookie) iounmap((cookie))

static inline bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn)
{
return false;
}

#endif /* _ASM_X86_XEN_PAGE_H */
5 changes: 4 additions & 1 deletion drivers/xen/swiotlb-xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) && !swiotlb_force) {
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
!swiotlb_force) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
Expand Down Expand Up @@ -557,6 +559,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
dma_addr_t dev_addr = xen_phys_to_bus(paddr);

if (swiotlb_force ||
xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
!dma_capable(hwdev, dev_addr, sg->length) ||
range_straddles_page_boundary(paddr, sg->length)) {
phys_addr_t map = swiotlb_tbl_map_single(hwdev,
Expand Down

0 comments on commit a4dba13

Please sign in to comment.