From 71ca46f0d7f02bf4c5df93fc9b31e185cb2c712f Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 6 Jan 2009 00:17:09 +0900 Subject: [PATCH] --- yaml --- r: 138910 b: refs/heads/master c: 5f812de63ce515265ebd84e932c762136924ab84 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/ia64/dig/Makefile | 4 +- trunk/arch/ia64/dig/dig_vtd_iommu.c | 59 ++++++ trunk/arch/ia64/hp/common/hwsw_iommu.c | 165 +++++++++++++-- trunk/arch/ia64/hp/common/sba_iommu.c | 79 +++---- trunk/arch/ia64/include/asm/dma-mapping.h | 194 ++++++++---------- trunk/arch/ia64/include/asm/machvec.h | 102 ++++++++- trunk/arch/ia64/include/asm/machvec_dig_vtd.h | 20 ++ trunk/arch/ia64/include/asm/machvec_hpzx1.h | 23 ++- .../ia64/include/asm/machvec_hpzx1_swiotlb.h | 27 ++- trunk/arch/ia64/include/asm/machvec_sn2.h | 27 ++- trunk/arch/ia64/kernel/Makefile | 4 +- trunk/arch/ia64/kernel/dma-mapping.c | 13 -- trunk/arch/ia64/kernel/machvec.c | 8 +- trunk/arch/ia64/kernel/pci-dma.c | 46 ++--- trunk/arch/ia64/kernel/pci-swiotlb.c | 30 +-- trunk/arch/ia64/sn/pci/pci_dma.c | 99 ++++----- trunk/arch/x86/include/asm/device.h | 2 +- trunk/arch/x86/include/asm/dma-mapping.h | 149 +++++++++----- trunk/arch/x86/include/asm/iommu.h | 2 +- trunk/arch/x86/kernel/Makefile | 2 +- trunk/arch/x86/kernel/amd_iommu.c | 26 +-- trunk/arch/x86/kernel/pci-calgary_64.c | 38 ++-- trunk/arch/x86/kernel/pci-dma.c | 4 +- trunk/arch/x86/kernel/pci-gart_64.c | 34 ++- trunk/arch/x86/kernel/pci-nommu.c | 21 +- .../{pci-swiotlb.c => pci-swiotlb_64.c} | 19 +- trunk/drivers/pci/intel-iommu.c | 50 ++--- trunk/include/linux/dma-mapping.h | 48 ----- trunk/include/linux/intel-iommu.h | 7 + trunk/include/linux/swiotlb.h | 41 ++-- trunk/lib/swiotlb.c | 88 ++++---- 32 files changed, 839 insertions(+), 594 deletions(-) create mode 100644 trunk/arch/ia64/dig/dig_vtd_iommu.c delete mode 100644 trunk/arch/ia64/kernel/dma-mapping.c rename trunk/arch/x86/kernel/{pci-swiotlb.c => pci-swiotlb_64.c} (81%) diff --git a/[refs] b/[refs] index bb17d10aff0f..2605db36e641 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7df4edb07cf54a4868b9a750424c0d2aa68f8d66 +refs/heads/master: 5f812de63ce515265ebd84e932c762136924ab84 diff --git a/trunk/arch/ia64/dig/Makefile b/trunk/arch/ia64/dig/Makefile index 2f7caddf093e..5c0283830bd6 100644 --- a/trunk/arch/ia64/dig/Makefile +++ b/trunk/arch/ia64/dig/Makefile @@ -7,8 +7,8 @@ obj-y := setup.o ifeq ($(CONFIG_DMAR), y) -obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o +obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o else obj-$(CONFIG_IA64_GENERIC) += machvec.o endif - +obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o diff --git a/trunk/arch/ia64/dig/dig_vtd_iommu.c b/trunk/arch/ia64/dig/dig_vtd_iommu.c new file mode 100644 index 000000000000..1c8a079017a3 --- /dev/null +++ b/trunk/arch/ia64/dig/dig_vtd_iommu.c @@ -0,0 +1,59 @@ +#include +#include +#include +#include + +void * +vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flags) +{ + return intel_alloc_coherent(dev, size, dma_handle, flags); +} +EXPORT_SYMBOL_GPL(vtd_alloc_coherent); + +void +vtd_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + intel_free_coherent(dev, size, vaddr, dma_handle); +} +EXPORT_SYMBOL_GPL(vtd_free_coherent); + +dma_addr_t +vtd_map_single_attrs(struct device *dev, void *addr, size_t size, + int dir, struct dma_attrs *attrs) +{ + return intel_map_single(dev, (phys_addr_t)addr, size, dir); +} +EXPORT_SYMBOL_GPL(vtd_map_single_attrs); + +void +vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, + int dir, struct dma_attrs *attrs) +{ + intel_unmap_single(dev, iova, size, dir); +} +EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs); + +int +vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, + int dir, struct dma_attrs *attrs) +{ + return intel_map_sg(dev, sglist, nents, dir); +} +EXPORT_SYMBOL_GPL(vtd_map_sg_attrs); + +void +vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, + int nents, int dir, struct dma_attrs *attrs) +{ + intel_unmap_sg(dev, sglist, nents, dir); +} +EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs); + +int +vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} +EXPORT_SYMBOL_GPL(vtd_dma_mapping_error); diff --git a/trunk/arch/ia64/hp/common/hwsw_iommu.c b/trunk/arch/ia64/hp/common/hwsw_iommu.c index e4a80d82e3d8..2769dbfd03bf 100644 --- a/trunk/arch/ia64/hp/common/hwsw_iommu.c +++ b/trunk/arch/ia64/hp/common/hwsw_iommu.c @@ -13,34 +13,49 @@ */ #include -#include #include -#include -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; +#include /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); +/* hwiommu declarations & definitions: */ + +extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; +extern ia64_mv_dma_free_coherent sba_free_coherent; +extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; +extern ia64_mv_dma_supported sba_dma_supported; +extern ia64_mv_dma_mapping_error sba_dma_mapping_error; + +#define hwiommu_alloc_coherent sba_alloc_coherent +#define hwiommu_free_coherent sba_free_coherent +#define hwiommu_map_single_attrs sba_map_single_attrs +#define hwiommu_unmap_single_attrs sba_unmap_single_attrs +#define hwiommu_map_sg_attrs sba_map_sg_attrs +#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs +#define hwiommu_dma_supported sba_dma_supported +#define hwiommu_dma_mapping_error sba_dma_mapping_error +#define hwiommu_sync_single_for_cpu machvec_dma_sync_single +#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg +#define hwiommu_sync_single_for_device machvec_dma_sync_single +#define hwiommu_sync_sg_for_device machvec_dma_sync_sg + + /* * Note: we need to make the determination of whether or not to use * the sw I/O TLB based purely on the device structure. Anything else * would be unreliable or would be too intrusive. */ -static inline int use_swiotlb(struct device *dev) +static inline int +use_swiotlb (struct device *dev) { - return dev && dev->dma_mask && - !sba_dma_ops.dma_supported(dev, *dev->dma_mask); + return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); } -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) -{ - if (use_swiotlb(dev)) - return &swiotlb_dma_ops; - return &sba_dma_ops; -} -EXPORT_SYMBOL(hwsw_dma_get_ops); - void __init hwsw_init (void) { @@ -56,3 +71,125 @@ hwsw_init (void) #endif } } + +void * +hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) +{ + if (use_swiotlb(dev)) + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); + else + return hwiommu_alloc_coherent(dev, size, dma_handle, flags); +} + +void +hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) +{ + if (use_swiotlb(dev)) + swiotlb_free_coherent(dev, size, vaddr, dma_handle); + else + hwiommu_free_coherent(dev, size, vaddr, dma_handle); +} + +dma_addr_t +hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, + struct dma_attrs *attrs) +{ + if (use_swiotlb(dev)) + return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); + else + return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); +} +EXPORT_SYMBOL(hwsw_map_single_attrs); + +void +hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, + int dir, struct dma_attrs *attrs) +{ + if (use_swiotlb(dev)) + return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); + else + return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); +} +EXPORT_SYMBOL(hwsw_unmap_single_attrs); + +int +hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, + int dir, struct dma_attrs *attrs) +{ + if (use_swiotlb(dev)) + return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); + else + return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); +} +EXPORT_SYMBOL(hwsw_map_sg_attrs); + +void +hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, + int dir, struct dma_attrs *attrs) +{ + if (use_swiotlb(dev)) + return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); + else + return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); +} +EXPORT_SYMBOL(hwsw_unmap_sg_attrs); + +void +hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) +{ + if (use_swiotlb(dev)) + swiotlb_sync_single_for_cpu(dev, addr, size, dir); + else + hwiommu_sync_single_for_cpu(dev, addr, size, dir); +} + +void +hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) +{ + if (use_swiotlb(dev)) + swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); + else + hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +void +hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) +{ + if (use_swiotlb(dev)) + swiotlb_sync_single_for_device(dev, addr, size, dir); + else + hwiommu_sync_single_for_device(dev, addr, size, dir); +} + +void +hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) +{ + if (use_swiotlb(dev)) + swiotlb_sync_sg_for_device(dev, sg, nelems, dir); + else + hwiommu_sync_sg_for_device(dev, sg, nelems, dir); +} + +int +hwsw_dma_supported (struct device *dev, u64 mask) +{ + if (hwiommu_dma_supported(dev, mask)) + return 1; + return swiotlb_dma_supported(dev, mask); +} + +int +hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return hwiommu_dma_mapping_error(dev, dma_addr) || + swiotlb_dma_mapping_error(dev, dma_addr); +} + +EXPORT_SYMBOL(hwsw_dma_mapping_error); +EXPORT_SYMBOL(hwsw_dma_supported); +EXPORT_SYMBOL(hwsw_alloc_coherent); +EXPORT_SYMBOL(hwsw_free_coherent); +EXPORT_SYMBOL(hwsw_sync_single_for_cpu); +EXPORT_SYMBOL(hwsw_sync_single_for_device); +EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); +EXPORT_SYMBOL(hwsw_sync_sg_for_device); diff --git a/trunk/arch/ia64/hp/common/sba_iommu.c b/trunk/arch/ia64/hp/common/sba_iommu.c index 56ceb68eb99d..6d5e6c5630e3 100644 --- a/trunk/arch/ia64/hp/common/sba_iommu.c +++ b/trunk/arch/ia64/hp/common/sba_iommu.c @@ -36,7 +36,6 @@ #include /* hweight64() */ #include #include -#include #include /* ia64_get_itc() */ #include @@ -909,13 +908,11 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static dma_addr_t sba_map_page(struct device *dev, struct page *page, - unsigned long poff, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +dma_addr_t +sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, + struct dma_attrs *attrs) { struct ioc *ioc; - void *addr = page_address(page) + poff; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; @@ -993,14 +990,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, #endif return SBA_IOVA(ioc, iovp, offset); } - -static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - return sba_map_page(dev, virt_to_page(addr), - (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); -} +EXPORT_SYMBOL(sba_map_single_attrs); #ifdef ENABLE_MARK_CLEAN static SBA_INLINE void @@ -1036,8 +1026,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) +void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, + int dir, struct dma_attrs *attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -1104,12 +1094,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif /* DELAYED_RESOURCE_CNT == 0 */ } - -void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) -{ - sba_unmap_page(dev, iova, size, dir, attrs); -} +EXPORT_SYMBOL(sba_unmap_single_attrs); /** * sba_alloc_coherent - allocate/map shared mem for DMA @@ -1119,7 +1104,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static void * +void * sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { struct ioc *ioc; @@ -1182,8 +1167,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); free_pages((unsigned long) vaddr, get_order(size)); @@ -1438,9 +1422,8 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) +int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, + int dir, struct dma_attrs *attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -1519,6 +1502,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, return filled; } +EXPORT_SYMBOL(sba_map_sg_attrs); /** * sba_unmap_sg_attrs - unmap Scatter/Gather list @@ -1530,9 +1514,8 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, * * See Documentation/PCI/PCI-DMA-mapping.txt */ -static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) +void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, + int nents, int dir, struct dma_attrs *attrs) { #ifdef ASSERT_PDIR_SANITY struct ioc *ioc; @@ -1568,6 +1551,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, #endif } +EXPORT_SYMBOL(sba_unmap_sg_attrs); /************************************************************** * @@ -2080,8 +2064,6 @@ static struct acpi_driver acpi_sba_ioc_driver = { }, }; -extern struct dma_map_ops swiotlb_dma_ops; - static int __init sba_init(void) { @@ -2095,7 +2077,6 @@ sba_init(void) * a successful kdump kernel boot is to use the swiotlb. */ if (is_kdump_kernel()) { - dma_ops = &swiotlb_dma_ops; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to initialize software I/O TLB:" " Try machvec=dig boot option"); @@ -2111,7 +2092,6 @@ sba_init(void) * If we didn't find something sba_iommu can claim, we * need to setup the swiotlb and switch to the dig machvec. */ - dma_ops = &swiotlb_dma_ops; if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) panic("Unable to find SBA IOMMU or initialize " "software I/O TLB: Try machvec=dig boot option"); @@ -2158,13 +2138,15 @@ nosbagart(char *str) return 1; } -static int sba_dma_supported (struct device *dev, u64 mask) +int +sba_dma_supported (struct device *dev, u64 mask) { /* make sure it's at least 32bit capable */ return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); } -static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +int +sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } @@ -2194,22 +2176,7 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); -struct dma_map_ops sba_dma_ops = { - .alloc_coherent = sba_alloc_coherent, - .free_coherent = sba_free_coherent, - .map_page = sba_map_page, - .unmap_page = sba_unmap_page, - .map_sg = sba_map_sg_attrs, - .unmap_sg = sba_unmap_sg_attrs, - .sync_single_for_cpu = machvec_dma_sync_single, - .sync_sg_for_cpu = machvec_dma_sync_sg, - .sync_single_for_device = machvec_dma_sync_single, - .sync_sg_for_device = machvec_dma_sync_sg, - .dma_supported = sba_dma_supported, - .mapping_error = sba_dma_mapping_error, -}; - -void sba_dma_init(void) -{ - dma_ops = &sba_dma_ops; -} +EXPORT_SYMBOL(sba_dma_mapping_error); +EXPORT_SYMBOL(sba_dma_supported); +EXPORT_SYMBOL(sba_alloc_coherent); +EXPORT_SYMBOL(sba_free_coherent); diff --git a/trunk/arch/ia64/include/asm/dma-mapping.h b/trunk/arch/ia64/include/asm/dma-mapping.h index 36c0009dbece..1f912d927585 100644 --- a/trunk/arch/ia64/include/asm/dma-mapping.h +++ b/trunk/arch/ia64/include/asm/dma-mapping.h @@ -11,128 +11,99 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK -extern struct dma_map_ops *dma_ops; +struct dma_mapping_ops { + int (*mapping_error)(struct device *dev, + dma_addr_t dma_addr); + void* (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr, + size_t size, int direction); + void (*unmap_single)(struct device *dev, dma_addr_t addr, + size_t size, int direction); + void (*sync_single_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, size_t size, + int direction); + void (*sync_single_for_device)(struct device *hwdev, + dma_addr_t dma_handle, size_t size, + int direction); + void (*sync_single_range_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, int direction); + void (*sync_single_range_for_device)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, int direction); + void (*sync_sg_for_cpu)(struct device *hwdev, + struct scatterlist *sg, int nelems, + int direction); + void (*sync_sg_for_device)(struct device *hwdev, + struct scatterlist *sg, int nelems, + int direction); + int (*map_sg)(struct device *hwdev, struct scatterlist *sg, + int nents, int direction); + void (*unmap_sg)(struct device *hwdev, + struct scatterlist *sg, int nents, + int direction); + int (*dma_supported_op)(struct device *hwdev, u64 mask); + int is_phys; +}; + +extern struct dma_mapping_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); -extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); -extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction); +#define dma_alloc_coherent(dev, size, handle, gfp) \ + platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *daddr, gfp_t gfp) +/* coherent mem. is cheap */ +static inline void * +dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->alloc_coherent(dev, size, daddr, gfp); + return dma_alloc_coherent(dev, size, dma_handle, flag); } - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *caddr, dma_addr_t daddr) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->free_coherent(dev, size, caddr, daddr); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -static inline dma_addr_t dma_map_single_attrs(struct device *dev, - void *caddr, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_page(dev, virt_to_page(caddr), - (unsigned long)caddr & ~PAGE_MASK, size, - dir, attrs); -} - -static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, - size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->unmap_page(dev, daddr, size, dir, attrs); -} - -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) - -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_sg(dev, sgl, nents, dir, attrs); -} - -static inline void dma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->unmap_sg(dev, sgl, nents, dir, attrs); -} - -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) - -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, - size_t size, - enum dma_data_direction dir) +#define dma_free_coherent platform_dma_free_coherent +static inline void +dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_single_for_cpu(dev, daddr, size, dir); + dma_free_coherent(dev, size, cpu_addr, dma_handle); } - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, - int nents, enum dma_data_direction dir) +#define dma_map_single_attrs platform_dma_map_single_attrs +static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, int dir) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_sg_for_cpu(dev, sgl, nents, dir); + return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); } - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t daddr, - size_t size, - enum dma_data_direction dir) +#define dma_map_sg_attrs platform_dma_map_sg_attrs +static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, int dir) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_single_for_device(dev, daddr, size, dir); + return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); } - -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, - int nents, - enum dma_data_direction dir) +#define dma_unmap_single_attrs platform_dma_unmap_single_attrs +static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, + size_t size, int dir) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_sg_for_device(dev, sgl, nents, dir); + return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); } - -static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->mapping_error(dev, daddr); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) +#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, int dir) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_page(dev, page, offset, size, dir, NULL); + return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); } +#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu +#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu +#define dma_sync_single_for_device platform_dma_sync_single_for_device +#define dma_sync_sg_for_device platform_dma_sync_sg_for_device +#define dma_mapping_error platform_dma_mapping_error -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, addr, size, dir); -} +#define dma_map_page(dev, pg, off, size, dir) \ + dma_map_single(dev, page_address(pg) + (off), (size), (dir)) +#define dma_unmap_page(dev, dma_addr, size, dir) \ + dma_unmap_single(dev, dma_addr, size, dir) /* * Rest of this file is part of the "Advanced DMA API". Use at your own risk. @@ -144,11 +115,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ dma_sync_single_for_device(dev, dma_handle, size, dir) -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->dma_supported(dev, mask); -} +#define dma_supported platform_dma_supported static inline int dma_set_mask (struct device *dev, u64 mask) @@ -174,4 +141,11 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size, #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ +static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) +{ + return dma_ops; +} + + + #endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/trunk/arch/ia64/include/asm/machvec.h b/trunk/arch/ia64/include/asm/machvec.h index 367d299d9938..fe87b2121707 100644 --- a/trunk/arch/ia64/include/asm/machvec.h +++ b/trunk/arch/ia64/include/asm/machvec.h @@ -11,6 +11,7 @@ #define _ASM_IA64_MACHVEC_H #include +#include /* forward declarations: */ struct device; @@ -44,8 +45,24 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); +typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); +typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); +typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); +typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); +typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); +typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); +typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); +typedef int ia64_mv_dma_supported (struct device *, u64); + +typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); +typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); +typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); +typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); typedef u64 ia64_mv_dma_get_required_mask (struct device *); -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -97,6 +114,8 @@ machvec_noop_bus (struct pci_bus *bus) extern void machvec_setup (char **); extern void machvec_timer_interrupt (int, void *); +extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); +extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); extern void machvec_tlb_migrate_finish (struct mm_struct *); # if defined (CONFIG_IA64_HP_SIM) @@ -129,8 +148,19 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish # define platform_dma_init ia64_mv.dma_init +# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent +# define platform_dma_free_coherent ia64_mv.dma_free_coherent +# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs +# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs +# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs +# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs +# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu +# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu +# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device +# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device +# define platform_dma_mapping_error ia64_mv.dma_mapping_error +# define platform_dma_supported ia64_mv.dma_supported # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask -# define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem @@ -173,8 +203,19 @@ struct ia64_machine_vector { ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; ia64_mv_dma_init *dma_init; + ia64_mv_dma_alloc_coherent *dma_alloc_coherent; + ia64_mv_dma_free_coherent *dma_free_coherent; + ia64_mv_dma_map_single_attrs *dma_map_single_attrs; + ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; + ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; + ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; + ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; + ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; + ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; + ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; + ia64_mv_dma_mapping_error *dma_mapping_error; + ia64_mv_dma_supported *dma_supported; ia64_mv_dma_get_required_mask *dma_get_required_mask; - ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_local_vector_to_irq *local_vector_to_irq; ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; @@ -213,8 +254,19 @@ struct ia64_machine_vector { platform_global_tlb_purge, \ platform_tlb_migrate_finish, \ platform_dma_init, \ + platform_dma_alloc_coherent, \ + platform_dma_free_coherent, \ + platform_dma_map_single_attrs, \ + platform_dma_unmap_single_attrs, \ + platform_dma_map_sg_attrs, \ + platform_dma_unmap_sg_attrs, \ + platform_dma_sync_single_for_cpu, \ + platform_dma_sync_sg_for_cpu, \ + platform_dma_sync_single_for_device, \ + platform_dma_sync_sg_for_device, \ + platform_dma_mapping_error, \ + platform_dma_supported, \ platform_dma_get_required_mask, \ - platform_dma_get_ops, \ platform_irq_to_vector, \ platform_local_vector_to_irq, \ platform_pci_get_legacy_mem, \ @@ -250,9 +302,6 @@ extern void machvec_init_from_cmdline(const char *cmdline); # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. # endif /* CONFIG_IA64_GENERIC */ -extern void swiotlb_dma_init(void); -extern struct dma_map_ops *dma_get_ops(struct device *); - /* * Define default versions so we can extend machvec for new platforms without having * to update the machvec files for all existing platforms. @@ -283,10 +332,43 @@ extern struct dma_map_ops *dma_get_ops(struct device *); # define platform_kernel_launch_event machvec_noop #endif #ifndef platform_dma_init -# define platform_dma_init swiotlb_dma_init +# define platform_dma_init swiotlb_init +#endif +#ifndef platform_dma_alloc_coherent +# define platform_dma_alloc_coherent swiotlb_alloc_coherent +#endif +#ifndef platform_dma_free_coherent +# define platform_dma_free_coherent swiotlb_free_coherent +#endif +#ifndef platform_dma_map_single_attrs +# define platform_dma_map_single_attrs swiotlb_map_single_attrs +#endif +#ifndef platform_dma_unmap_single_attrs +# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs +#endif +#ifndef platform_dma_map_sg_attrs +# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs +#endif +#ifndef platform_dma_unmap_sg_attrs +# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs +#endif +#ifndef platform_dma_sync_single_for_cpu +# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu +#endif +#ifndef platform_dma_sync_sg_for_cpu +# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu +#endif +#ifndef platform_dma_sync_single_for_device +# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device +#endif +#ifndef platform_dma_sync_sg_for_device +# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device +#endif +#ifndef platform_dma_mapping_error +# define platform_dma_mapping_error swiotlb_dma_mapping_error #endif -#ifndef platform_dma_get_ops -# define platform_dma_get_ops dma_get_ops +#ifndef platform_dma_supported +# define platform_dma_supported swiotlb_dma_supported #endif #ifndef platform_dma_get_required_mask # define platform_dma_get_required_mask ia64_dma_get_required_mask diff --git a/trunk/arch/ia64/include/asm/machvec_dig_vtd.h b/trunk/arch/ia64/include/asm/machvec_dig_vtd.h index 6ab1de5c45ef..3400b561e711 100644 --- a/trunk/arch/ia64/include/asm/machvec_dig_vtd.h +++ b/trunk/arch/ia64/include/asm/machvec_dig_vtd.h @@ -2,6 +2,14 @@ #define _ASM_IA64_MACHVEC_DIG_VTD_h extern ia64_mv_setup_t dig_setup; +extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent; +extern ia64_mv_dma_free_coherent vtd_free_coherent; +extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs; +extern ia64_mv_dma_supported iommu_dma_supported; +extern ia64_mv_dma_mapping_error vtd_dma_mapping_error; extern ia64_mv_dma_init pci_iommu_alloc; /* @@ -14,5 +22,17 @@ extern ia64_mv_dma_init pci_iommu_alloc; #define platform_name "dig_vtd" #define platform_setup dig_setup #define platform_dma_init pci_iommu_alloc +#define platform_dma_alloc_coherent vtd_alloc_coherent +#define platform_dma_free_coherent vtd_free_coherent +#define platform_dma_map_single_attrs vtd_map_single_attrs +#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs +#define platform_dma_map_sg_attrs vtd_map_sg_attrs +#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs +#define platform_dma_sync_single_for_cpu machvec_dma_sync_single +#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg +#define platform_dma_sync_single_for_device machvec_dma_sync_single +#define platform_dma_sync_sg_for_device machvec_dma_sync_sg +#define platform_dma_supported iommu_dma_supported +#define platform_dma_mapping_error vtd_dma_mapping_error #endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */ diff --git a/trunk/arch/ia64/include/asm/machvec_hpzx1.h b/trunk/arch/ia64/include/asm/machvec_hpzx1.h index 3bd83d78a412..2f57f5144b9f 100644 --- a/trunk/arch/ia64/include/asm/machvec_hpzx1.h +++ b/trunk/arch/ia64/include/asm/machvec_hpzx1.h @@ -2,7 +2,14 @@ #define _ASM_IA64_MACHVEC_HPZX1_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_init sba_dma_init; +extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; +extern ia64_mv_dma_free_coherent sba_free_coherent; +extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; +extern ia64_mv_dma_supported sba_dma_supported; +extern ia64_mv_dma_mapping_error sba_dma_mapping_error; /* * This stuff has dual use! @@ -13,6 +20,18 @@ extern ia64_mv_dma_init sba_dma_init; */ #define platform_name "hpzx1" #define platform_setup dig_setup -#define platform_dma_init sba_dma_init +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sba_alloc_coherent +#define platform_dma_free_coherent sba_free_coherent +#define platform_dma_map_single_attrs sba_map_single_attrs +#define platform_dma_unmap_single_attrs sba_unmap_single_attrs +#define platform_dma_map_sg_attrs sba_map_sg_attrs +#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs +#define platform_dma_sync_single_for_cpu machvec_dma_sync_single +#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg +#define platform_dma_sync_single_for_device machvec_dma_sync_single +#define platform_dma_sync_sg_for_device machvec_dma_sync_sg +#define platform_dma_supported sba_dma_supported +#define platform_dma_mapping_error sba_dma_mapping_error #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff --git a/trunk/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/trunk/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h index 1091ac39740c..a842cdda827b 100644 --- a/trunk/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h +++ b/trunk/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h @@ -2,7 +2,18 @@ #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_get_ops hwsw_dma_get_ops; +extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; +extern ia64_mv_dma_free_coherent hwsw_free_coherent; +extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; +extern ia64_mv_dma_supported hwsw_dma_supported; +extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; +extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; /* * This stuff has dual use! @@ -12,8 +23,20 @@ extern ia64_mv_dma_get_ops hwsw_dma_get_ops; * the macros are used directly. */ #define platform_name "hpzx1_swiotlb" + #define platform_setup dig_setup #define platform_dma_init machvec_noop -#define platform_dma_get_ops hwsw_dma_get_ops +#define platform_dma_alloc_coherent hwsw_alloc_coherent +#define platform_dma_free_coherent hwsw_free_coherent +#define platform_dma_map_single_attrs hwsw_map_single_attrs +#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs +#define platform_dma_map_sg_attrs hwsw_map_sg_attrs +#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs +#define platform_dma_supported hwsw_dma_supported +#define platform_dma_mapping_error hwsw_dma_mapping_error +#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu +#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu +#define platform_dma_sync_single_for_device hwsw_sync_single_for_device +#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ diff --git a/trunk/arch/ia64/include/asm/machvec_sn2.h b/trunk/arch/ia64/include/asm/machvec_sn2.h index f061a30aac42..f1a6e0d6dfa5 100644 --- a/trunk/arch/ia64/include/asm/machvec_sn2.h +++ b/trunk/arch/ia64/include/asm/machvec_sn2.h @@ -55,8 +55,19 @@ extern ia64_mv_readb_t __sn_readb_relaxed; extern ia64_mv_readw_t __sn_readw_relaxed; extern ia64_mv_readl_t __sn_readl_relaxed; extern ia64_mv_readq_t __sn_readq_relaxed; +extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; +extern ia64_mv_dma_free_coherent sn_dma_free_coherent; +extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; +extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; +extern ia64_mv_dma_mapping_error sn_dma_mapping_error; +extern ia64_mv_dma_supported sn_dma_supported; extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; -extern ia64_mv_dma_init sn_dma_init; extern ia64_mv_migrate_t sn_migrate; extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; @@ -100,8 +111,20 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem #define platform_pci_legacy_read sn_pci_legacy_read #define platform_pci_legacy_write sn_pci_legacy_write +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sn_dma_alloc_coherent +#define platform_dma_free_coherent sn_dma_free_coherent +#define platform_dma_map_single_attrs sn_dma_map_single_attrs +#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs +#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs +#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs +#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu +#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu +#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device +#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device +#define platform_dma_mapping_error sn_dma_mapping_error +#define platform_dma_supported sn_dma_supported #define platform_dma_get_required_mask sn_dma_get_required_mask -#define platform_dma_init sn_dma_init #define platform_migrate sn_migrate #define platform_kernel_launch_event sn_kernel_launch_event #ifdef CONFIG_PCI_MSI diff --git a/trunk/arch/ia64/kernel/Makefile b/trunk/arch/ia64/kernel/Makefile index f2778f2c4fd9..c381ea954892 100644 --- a/trunk/arch/ia64/kernel/Makefile +++ b/trunk/arch/ia64/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ - unwind.o mca.o mca_asm.o topology.o dma-mapping.o + unwind.o mca.o mca_asm.o topology.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o @@ -43,7 +43,9 @@ ifneq ($(CONFIG_IA64_ESI),) obj-y += esi_stub.o # must be in kernel proper endif obj-$(CONFIG_DMAR) += pci-dma.o +ifeq ($(CONFIG_DMAR), y) obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o +endif # The gate DSO image is built using a special linker script. targets += gate.so gate-syms.o diff --git a/trunk/arch/ia64/kernel/dma-mapping.c b/trunk/arch/ia64/kernel/dma-mapping.c deleted file mode 100644 index 086a2aeb0404..000000000000 --- a/trunk/arch/ia64/kernel/dma-mapping.c +++ /dev/null @@ -1,13 +0,0 @@ -#include - -/* Set this to 1 if there is a HW IOMMU in the system */ -int iommu_detected __read_mostly; - -struct dma_map_ops *dma_ops; -EXPORT_SYMBOL(dma_ops); - -struct dma_map_ops *dma_get_ops(struct device *dev) -{ - return dma_ops; -} -EXPORT_SYMBOL(dma_get_ops); diff --git a/trunk/arch/ia64/kernel/machvec.c b/trunk/arch/ia64/kernel/machvec.c index d41a40ef80c0..7ccb228ceedc 100644 --- a/trunk/arch/ia64/kernel/machvec.c +++ b/trunk/arch/ia64/kernel/machvec.c @@ -1,5 +1,5 @@ #include -#include + #include #include @@ -75,16 +75,14 @@ machvec_timer_interrupt (int irq, void *dev_id) EXPORT_SYMBOL(machvec_timer_interrupt); void -machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir) +machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir) { mb(); } EXPORT_SYMBOL(machvec_dma_sync_single); void -machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n, - enum dma_data_direction dir) +machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir) { mb(); } diff --git a/trunk/arch/ia64/kernel/pci-dma.c b/trunk/arch/ia64/kernel/pci-dma.c index e4cb443bb988..d0ada067a4af 100644 --- a/trunk/arch/ia64/kernel/pci-dma.c +++ b/trunk/arch/ia64/kernel/pci-dma.c @@ -32,6 +32,9 @@ int force_iommu __read_mostly = 1; int force_iommu __read_mostly; #endif +/* Set this to 1 if there is a HW IOMMU in the system */ +int iommu_detected __read_mostly; + /* Dummy device used for NULL arguments (normally ISA). Better would be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */ @@ -41,7 +44,18 @@ struct device fallback_dev = { .dma_mask = &fallback_dev.coherent_dma_mask, }; -extern struct dma_map_ops intel_dma_ops; +void __init pci_iommu_alloc(void) +{ + /* + * The order of these functions is important for + * fall-back/fail-over reasons + */ + detect_intel_iommu(); + +#ifdef CONFIG_SWIOTLB + pci_swiotlb_init(); +#endif +} static int __init pci_iommu_init(void) { @@ -65,12 +79,15 @@ iommu_dma_init(void) return; } +struct dma_mapping_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + int iommu_dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); - if (ops->dma_supported) - return ops->dma_supported(dev, mask); + if (ops->dma_supported_op) + return ops->dma_supported_op(dev, mask); /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. @@ -99,25 +116,4 @@ int iommu_dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(iommu_dma_supported); -void __init pci_iommu_alloc(void) -{ - dma_ops = &intel_dma_ops; - - dma_ops->sync_single_for_cpu = machvec_dma_sync_single; - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; - dma_ops->sync_single_for_device = machvec_dma_sync_single; - dma_ops->sync_sg_for_device = machvec_dma_sync_sg; - dma_ops->dma_supported = iommu_dma_supported; - - /* - * The order of these functions is important for - * fall-back/fail-over reasons - */ - detect_intel_iommu(); - -#ifdef CONFIG_SWIOTLB - pci_swiotlb_init(); -#endif -} - #endif diff --git a/trunk/arch/ia64/kernel/pci-swiotlb.c b/trunk/arch/ia64/kernel/pci-swiotlb.c index 573f02c39a00..16c50516dbc1 100644 --- a/trunk/arch/ia64/kernel/pci-swiotlb.c +++ b/trunk/arch/ia64/kernel/pci-swiotlb.c @@ -13,37 +13,23 @@ int swiotlb __read_mostly; EXPORT_SYMBOL(swiotlb); -static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - if (dev->coherent_dma_mask != DMA_64BIT_MASK) - gfp |= GFP_DMA; - return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); -} - -struct dma_map_ops swiotlb_dma_ops = { - .alloc_coherent = ia64_swiotlb_alloc_coherent, +struct dma_mapping_ops swiotlb_dma_ops = { + .mapping_error = swiotlb_dma_mapping_error, + .alloc_coherent = swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, + .map_single = swiotlb_map_single, + .unmap_single = swiotlb_unmap_single, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, - .dma_supported = swiotlb_dma_supported, - .mapping_error = swiotlb_dma_mapping_error, + .map_sg = swiotlb_map_sg, + .unmap_sg = swiotlb_unmap_sg, + .dma_supported_op = swiotlb_dma_supported, }; -void __init swiotlb_dma_init(void) -{ - dma_ops = &swiotlb_dma_ops; - swiotlb_init(); -} - void __init pci_swiotlb_init(void) { if (!iommu_detected) { diff --git a/trunk/arch/ia64/sn/pci/pci_dma.c b/trunk/arch/ia64/sn/pci/pci_dma.c index 8c130e8f00e1..863f5017baae 100644 --- a/trunk/arch/ia64/sn/pci/pci_dma.c +++ b/trunk/arch/ia64/sn/pci/pci_dma.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include @@ -31,7 +31,7 @@ * this function. Of course, SN only supports devices that have 32 or more * address bits when using the PMU. */ -static int sn_dma_supported(struct device *dev, u64 mask) +int sn_dma_supported(struct device *dev, u64 mask) { BUG_ON(dev->bus != &pci_bus_type); @@ -39,6 +39,7 @@ static int sn_dma_supported(struct device *dev, u64 mask) return 0; return 1; } +EXPORT_SYMBOL(sn_dma_supported); /** * sn_dma_set_mask - set the DMA mask @@ -74,8 +75,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); * queue for a SCSI controller). See Documentation/DMA-API.txt for * more information. */ -static void *sn_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t flags) +void *sn_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t * dma_handle, gfp_t flags) { void *cpuaddr; unsigned long phys_addr; @@ -123,6 +124,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, return cpuaddr; } +EXPORT_SYMBOL(sn_dma_alloc_coherent); /** * sn_pci_free_coherent - free memory associated with coherent DMAable region @@ -134,8 +136,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping * any associated IOMMU mappings. */ -static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) +void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -145,6 +147,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr provider->dma_unmap(pdev, dma_handle, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } +EXPORT_SYMBOL(sn_dma_free_coherent); /** * sn_dma_map_single_attrs - map a single page for DMA @@ -170,12 +173,10 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr * TODO: simplify our interface; * figure out how to save dmamap handle so can use two step. */ -static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, + size_t size, int direction, + struct dma_attrs *attrs) { - void *cpu_addr = page_address(page) + offset; dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); @@ -200,6 +201,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, } return dma_addr; } +EXPORT_SYMBOL(sn_dma_map_single_attrs); /** * sn_dma_unmap_single_attrs - unamp a DMA mapped page @@ -213,20 +215,21 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, * by @dma_handle into the coherence domain. On SN, we're always cache * coherent, so we just need to free any ATEs associated with this mapping. */ -static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction, + struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); BUG_ON(dev->bus != &pci_bus_type); - provider->dma_unmap(pdev, dma_addr, dir); + provider->dma_unmap(pdev, dma_addr, direction); } +EXPORT_SYMBOL(sn_dma_unmap_single_attrs); /** - * sn_dma_unmap_sg - unmap a DMA scatterlist + * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist * @dev: device to unmap * @sg: scatterlist to unmap * @nhwentries: number of scatterlist entries @@ -235,9 +238,9 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, * * Unmap a set of streaming mode DMA translations. */ -static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) +void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nhwentries, int direction, + struct dma_attrs *attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); @@ -247,14 +250,15 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, BUG_ON(dev->bus != &pci_bus_type); for_each_sg(sgl, sg, nhwentries, i) { - provider->dma_unmap(pdev, sg->dma_address, dir); + provider->dma_unmap(pdev, sg->dma_address, direction); sg->dma_address = (dma_addr_t) NULL; sg->dma_length = 0; } } +EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); /** - * sn_dma_map_sg - map a scatterlist for DMA + * sn_dma_map_sg_attrs - map a scatterlist for DMA * @dev: device to map for * @sg: scatterlist to map * @nhwentries: number of entries @@ -268,9 +272,8 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, * * Maps each entry of @sg for DMA. */ -static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, - int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) +int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nhwentries, int direction, struct dma_attrs *attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; @@ -307,7 +310,8 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, * Free any successfully allocated entries. */ if (i > 0) - sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); + sn_dma_unmap_sg_attrs(dev, saved_sg, i, + direction, attrs); return 0; } @@ -316,36 +320,41 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, return nhwentries; } +EXPORT_SYMBOL(sn_dma_map_sg_attrs); -static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) +void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) { BUG_ON(dev->bus != &pci_bus_type); } +EXPORT_SYMBOL(sn_dma_sync_single_for_cpu); -static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction dir) +void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) { BUG_ON(dev->bus != &pci_bus_type); } +EXPORT_SYMBOL(sn_dma_sync_single_for_device); -static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) +void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, int direction) { BUG_ON(dev->bus != &pci_bus_type); } +EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu); -static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) +void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, int direction) { BUG_ON(dev->bus != &pci_bus_type); } +EXPORT_SYMBOL(sn_dma_sync_sg_for_device); -static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } +EXPORT_SYMBOL(sn_dma_mapping_error); u64 sn_dma_get_required_mask(struct device *dev) { @@ -462,23 +471,3 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) out: return ret; } - -static struct dma_map_ops sn_dma_ops = { - .alloc_coherent = sn_dma_alloc_coherent, - .free_coherent = sn_dma_free_coherent, - .map_page = sn_dma_map_page, - .unmap_page = sn_dma_unmap_page, - .map_sg = sn_dma_map_sg, - .unmap_sg = sn_dma_unmap_sg, - .sync_single_for_cpu = sn_dma_sync_single_for_cpu, - .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, - .sync_single_for_device = sn_dma_sync_single_for_device, - .sync_sg_for_device = sn_dma_sync_sg_for_device, - .mapping_error = sn_dma_mapping_error, - .dma_supported = sn_dma_supported, -}; - -void sn_dma_init(void) -{ - dma_ops = &sn_dma_ops; -} diff --git a/trunk/arch/x86/include/asm/device.h b/trunk/arch/x86/include/asm/device.h index 4994a20acbcb..3c034f48fdb0 100644 --- a/trunk/arch/x86/include/asm/device.h +++ b/trunk/arch/x86/include/asm/device.h @@ -6,7 +6,7 @@ struct dev_archdata { void *acpi_handle; #endif #ifdef CONFIG_X86_64 -struct dma_map_ops *dma_ops; +struct dma_mapping_ops *dma_ops; #endif #ifdef CONFIG_DMAR void *iommu; /* hook for IOMMU specific extension */ diff --git a/trunk/arch/x86/include/asm/dma-mapping.h b/trunk/arch/x86/include/asm/dma-mapping.h index 9c78bd40ebec..132a134d12f2 100644 --- a/trunk/arch/x86/include/asm/dma-mapping.h +++ b/trunk/arch/x86/include/asm/dma-mapping.h @@ -7,7 +7,6 @@ */ #include -#include #include #include #include @@ -17,9 +16,47 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; - -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +struct dma_mapping_ops { + int (*mapping_error)(struct device *dev, + dma_addr_t dma_addr); + void* (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, + size_t size, int direction); + void (*unmap_single)(struct device *dev, dma_addr_t addr, + size_t size, int direction); + void (*sync_single_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, size_t size, + int direction); + void (*sync_single_for_device)(struct device *hwdev, + dma_addr_t dma_handle, size_t size, + int direction); + void (*sync_single_range_for_cpu)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, int direction); + void (*sync_single_range_for_device)(struct device *hwdev, + dma_addr_t dma_handle, unsigned long offset, + size_t size, int direction); + void (*sync_sg_for_cpu)(struct device *hwdev, + struct scatterlist *sg, int nelems, + int direction); + void (*sync_sg_for_device)(struct device *hwdev, + struct scatterlist *sg, int nelems, + int direction); + int (*map_sg)(struct device *hwdev, struct scatterlist *sg, + int nents, int direction); + void (*unmap_sg)(struct device *hwdev, + struct scatterlist *sg, int nents, + int direction); + int (*dma_supported)(struct device *hwdev, u64 mask); + int is_phys; +}; + +extern struct dma_mapping_ops *dma_ops; + +static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) { #ifdef CONFIG_X86_32 return dma_ops; @@ -34,7 +71,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -53,139 +90,137 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size, - enum dma_data_direction dir) + int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); - return ops->map_page(hwdev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, NULL); + BUG_ON(!valid_dma_direction(direction)); + return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); } static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir) + int direction) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); + BUG_ON(!valid_dma_direction(direction)); + if (ops->unmap_single) + ops->unmap_single(dev, addr, size, direction); } static inline int dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) + int nents, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); - return ops->map_sg(hwdev, sg, nents, dir, NULL); + BUG_ON(!valid_dma_direction(direction)); + return ops->map_sg(hwdev, sg, nents, direction); } static inline void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) + int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->unmap_sg) - ops->unmap_sg(hwdev, sg, nents, dir, NULL); + ops->unmap_sg(hwdev, sg, nents, direction); } static inline void dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) + size_t size, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); + ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) + size_t size, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_single_for_device) - ops->sync_single_for_device(hwdev, dma_handle, size, dir); + ops->sync_single_for_device(hwdev, dma_handle, size, direction); flush_write_buffers(); } static inline void dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) + unsigned long offset, size_t size, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_single_range_for_cpu) ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, dir); + size, direction); flush_write_buffers(); } static inline void dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction dir) + int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_single_range_for_device) ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, dir); + offset, size, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) + int nelems, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); + ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); flush_write_buffers(); } static inline void dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) + int nelems, int direction) { - struct dma_map_ops *ops = get_dma_ops(hwdev); + struct dma_mapping_ops *ops = get_dma_ops(hwdev); - BUG_ON(!valid_dma_direction(dir)); + BUG_ON(!valid_dma_direction(direction)); if (ops->sync_sg_for_device) - ops->sync_sg_for_device(hwdev, sg, nelems, dir); + ops->sync_sg_for_device(hwdev, sg, nelems, direction); flush_write_buffers(); } static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, - enum dma_data_direction dir) + int direction) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); - BUG_ON(!valid_dma_direction(dir)); - return ops->map_page(dev, page, offset, size, dir, NULL); + BUG_ON(!valid_dma_direction(direction)); + return ops->map_single(dev, page_to_phys(page) + offset, + size, direction); } static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) + size_t size, int direction) { - dma_unmap_single(dev, addr, size, dir); + dma_unmap_single(dev, addr, size, direction); } static inline void @@ -231,7 +266,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); void *memory; gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -257,7 +292,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); WARN_ON(irqs_disabled()); /* for portability */ diff --git a/trunk/arch/x86/include/asm/iommu.h b/trunk/arch/x86/include/asm/iommu.h index af326a2975b5..a6ee9e6f530f 100644 --- a/trunk/arch/x86/include/asm/iommu.h +++ b/trunk/arch/x86/include/asm/iommu.h @@ -3,7 +3,7 @@ extern void pci_iommu_shutdown(void); extern void no_iommu_init(void); -extern struct dma_map_ops nommu_dma_ops; +extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index bb1eef62d8f0..d364df03c1d6 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -109,7 +109,7 @@ obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o -obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o +obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o # NB rename without _64 ### # 64 bit specific files diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index 008e522b9536..d7a7c4c063ff 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -22,11 +22,8 @@ #include #include #include -#include #include -#ifdef CONFIG_IOMMU_API #include -#endif #include #include #include @@ -1298,10 +1295,8 @@ static void __unmap_single(struct amd_iommu *iommu, /* * The exported map_single function for dma_ops. */ -static dma_addr_t map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, + size_t size, int dir) { unsigned long flags; struct amd_iommu *iommu; @@ -1309,7 +1304,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page, u16 devid; dma_addr_t addr; u64 dma_mask; - phys_addr_t paddr = page_to_phys(page) + offset; INC_STATS_COUNTER(cnt_map_single); @@ -1344,8 +1338,8 @@ static dma_addr_t map_page(struct device *dev, struct page *page, /* * The exported unmap_single function for dma_ops. */ -static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) +static void unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int dir) { unsigned long flags; struct amd_iommu *iommu; @@ -1394,8 +1388,7 @@ static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, * lists). */ static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + int nelems, int dir) { unsigned long flags; struct amd_iommu *iommu; @@ -1462,8 +1455,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, * lists). */ static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + int nelems, int dir) { unsigned long flags; struct amd_iommu *iommu; @@ -1650,11 +1642,11 @@ static void prealloc_protection_domains(void) } } -static struct dma_map_ops amd_iommu_dma_ops = { +static struct dma_mapping_ops amd_iommu_dma_ops = { .alloc_coherent = alloc_coherent, .free_coherent = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, + .map_single = map_single, + .unmap_single = unmap_single, .map_sg = map_sg, .unmap_sg = unmap_sg, .dma_supported = amd_iommu_dma_supported, diff --git a/trunk/arch/x86/kernel/pci-calgary_64.c b/trunk/arch/x86/kernel/pci-calgary_64.c index 755c21e906f3..d28bbdc35e4e 100644 --- a/trunk/arch/x86/kernel/pci-calgary_64.c +++ b/trunk/arch/x86/kernel/pci-calgary_64.c @@ -380,9 +380,8 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) return tbl; } -static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems,enum dma_data_direction dir, - struct dma_attrs *attrs) +static void calgary_unmap_sg(struct device *dev, + struct scatterlist *sglist, int nelems, int direction) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -405,8 +404,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, } static int calgary_map_sg(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + int nelems, int direction) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -431,14 +429,15 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, s->dma_address = (entry << PAGE_SHIFT) | s->offset; /* insert into HW table */ - tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); + tce_build(tbl, entry, npages, vaddr & PAGE_MASK, + direction); s->dma_length = s->length; } return nelems; error: - calgary_unmap_sg(dev, sg, nelems, dir, NULL); + calgary_unmap_sg(dev, sg, nelems, direction); for_each_sg(sg, s, nelems, i) { sg->dma_address = bad_dma_address; sg->dma_length = 0; @@ -446,12 +445,10 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, return 0; } -static dma_addr_t calgary_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, + size_t size, int direction) { - void *vaddr = page_address(page) + offset; + void *vaddr = phys_to_virt(paddr); unsigned long uaddr; unsigned int npages; struct iommu_table *tbl = find_iommu_table(dev); @@ -459,18 +456,17 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, uaddr = (unsigned long)vaddr; npages = iommu_num_pages(uaddr, size, PAGE_SIZE); - return iommu_alloc(dev, tbl, vaddr, npages, dir); + return iommu_alloc(dev, tbl, vaddr, npages, direction); } -static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) { struct iommu_table *tbl = find_iommu_table(dev); unsigned int npages; - npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); - iommu_free(tbl, dma_addr, npages); + npages = iommu_num_pages(dma_handle, size, PAGE_SIZE); + iommu_free(tbl, dma_handle, npages); } static void* calgary_alloc_coherent(struct device *dev, size_t size, @@ -519,13 +515,13 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_map_ops calgary_dma_ops = { +static struct dma_mapping_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .free_coherent = calgary_free_coherent, + .map_single = calgary_map_single, + .unmap_single = calgary_unmap_single, .map_sg = calgary_map_sg, .unmap_sg = calgary_unmap_sg, - .map_page = calgary_map_page, - .unmap_page = calgary_unmap_page, }; static inline void __iomem * busno_to_bbar(unsigned char num) diff --git a/trunk/arch/x86/kernel/pci-dma.c b/trunk/arch/x86/kernel/pci-dma.c index f293a8df6828..b25428533141 100644 --- a/trunk/arch/x86/kernel/pci-dma.c +++ b/trunk/arch/x86/kernel/pci-dma.c @@ -12,7 +12,7 @@ static int forbid_dac __read_mostly; -struct dma_map_ops *dma_ops; +struct dma_mapping_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -224,7 +224,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + struct dma_mapping_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff --git a/trunk/arch/x86/kernel/pci-gart_64.c b/trunk/arch/x86/kernel/pci-gart_64.c index b284b58c035c..d5768b1af080 100644 --- a/trunk/arch/x86/kernel/pci-gart_64.c +++ b/trunk/arch/x86/kernel/pci-gart_64.c @@ -255,13 +255,10 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, } /* Map a single area into the IOMMU */ -static dma_addr_t gart_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +static dma_addr_t +gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) { unsigned long bus; - phys_addr_t paddr = page_to_phys(page) + offset; if (!dev) dev = &x86_dma_fallback_dev; @@ -278,9 +275,8 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, /* * Free a DMA mapping. */ -static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) { unsigned long iommu_page; int npages; @@ -302,8 +298,8 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, /* * Wrapper for pci_unmap_single working with scatterlists. */ -static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) +static void +gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) { struct scatterlist *s; int i; @@ -311,7 +307,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; - gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); + gart_unmap_single(dev, s->dma_address, s->dma_length, dir); } } @@ -333,7 +329,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, addr = dma_map_area(dev, addr, s->length, dir, 0); if (addr == bad_dma_address) { if (i > 0) - gart_unmap_sg(dev, sg, i, dir, NULL); + gart_unmap_sg(dev, sg, i, dir); nents = 0; sg[0].dma_length = 0; break; @@ -404,8 +400,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, * DMA map all entries in a scatterlist. * Merge chunks that have page aligned sizes into a continuous mapping. */ -static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) +static int +gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) { struct scatterlist *s, *ps, *start_sg, *sgmap; int need = 0, nextneed, i, out, start; @@ -472,7 +468,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, error: flush_gart(); - gart_unmap_sg(dev, sg, out, dir, NULL); + gart_unmap_sg(dev, sg, out, dir); /* When it was forced or merged try again in a dumb way */ if (force_iommu || iommu_merge) { @@ -525,7 +521,7 @@ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr) { - gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); + gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); free_pages((unsigned long)vaddr, get_order(size)); } @@ -711,11 +707,11 @@ static __init int init_k8_gatt(struct agp_kern_info *info) return -1; } -static struct dma_map_ops gart_dma_ops = { +static struct dma_mapping_ops gart_dma_ops = { + .map_single = gart_map_single, + .unmap_single = gart_unmap_single, .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, - .map_page = gart_map_page, - .unmap_page = gart_unmap_page, .alloc_coherent = gart_alloc_coherent, .free_coherent = gart_free_coherent, }; diff --git a/trunk/arch/x86/kernel/pci-nommu.c b/trunk/arch/x86/kernel/pci-nommu.c index fe50214db876..c70ab5a5d4c8 100644 --- a/trunk/arch/x86/kernel/pci-nommu.c +++ b/trunk/arch/x86/kernel/pci-nommu.c @@ -25,19 +25,19 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) return 1; } -static dma_addr_t nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +static dma_addr_t +nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, + int direction) { - dma_addr_t bus = page_to_phys(page) + offset; + dma_addr_t bus = paddr; WARN_ON(size == 0); - if (!check_addr("map_single", dev, bus, size)) - return bad_dma_address; + if (!check_addr("map_single", hwdev, bus, size)) + return bad_dma_address; flush_write_buffers(); return bus; } + /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scatter-gather version of the * above pci_map_single interface. Here the scatter gather list @@ -54,8 +54,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, * the same here. */ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + int nents, int direction) { struct scatterlist *s; int i; @@ -79,11 +78,11 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages((unsigned long)vaddr, get_order(size)); } -struct dma_map_ops nommu_dma_ops = { +struct dma_mapping_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = nommu_free_coherent, + .map_single = nommu_map_single, .map_sg = nommu_map_sg, - .map_page = nommu_map_page, .is_phys = 1, }; diff --git a/trunk/arch/x86/kernel/pci-swiotlb.c b/trunk/arch/x86/kernel/pci-swiotlb_64.c similarity index 81% rename from trunk/arch/x86/kernel/pci-swiotlb.c rename to trunk/arch/x86/kernel/pci-swiotlb_64.c index 34f12e9996ed..d59c91747665 100644 --- a/trunk/arch/x86/kernel/pci-swiotlb.c +++ b/trunk/arch/x86/kernel/pci-swiotlb_64.c @@ -33,11 +33,18 @@ phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr) return baddr; } -int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) +int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) { return 0; } +static dma_addr_t +swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, + int direction) +{ + return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); +} + static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { @@ -50,20 +57,20 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -struct dma_map_ops swiotlb_dma_ops = { +struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, + .map_single = swiotlb_map_single_phys, + .unmap_single = swiotlb_unmap_single, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, + .map_sg = swiotlb_map_sg, + .unmap_sg = swiotlb_unmap_sg, .dma_supported = NULL, }; diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index e7d058aa7b0d..f3f686581a90 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -2284,13 +2284,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, return 0; } -static dma_addr_t intel_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir) { - return __intel_map_single(dev, page_to_phys(page) + offset, size, - dir, to_pci_dev(dev)->dma_mask); + return __intel_map_single(hwdev, paddr, size, dir, + to_pci_dev(hwdev)->dma_mask); } static void flush_unmaps(void) @@ -2354,9 +2352,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -2400,14 +2397,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, } } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, - int dir) -{ - intel_unmap_page(dev, dev_addr, size, dir, NULL); -} - -static void *intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; @@ -2430,8 +2421,8 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; } -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; @@ -2444,9 +2435,8 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2503,8 +2493,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + int dir) { void *addr; int i; @@ -2584,19 +2574,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne return nelems; } -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return !dma_addr; -} - -struct dma_map_ops intel_dma_ops = { +static struct dma_mapping_ops intel_dma_ops = { .alloc_coherent = intel_alloc_coherent, .free_coherent = intel_free_coherent, + .map_single = intel_map_single, + .unmap_single = intel_unmap_single, .map_sg = intel_map_sg, .unmap_sg = intel_unmap_sg, - .map_page = intel_map_page, - .unmap_page = intel_unmap_page, - .mapping_error = intel_mapping_error, }; static inline int iommu_domain_cache_init(void) diff --git a/trunk/include/linux/dma-mapping.h b/trunk/include/linux/dma-mapping.h index d7d090d21031..ba9114ec5d3a 100644 --- a/trunk/include/linux/dma-mapping.h +++ b/trunk/include/linux/dma-mapping.h @@ -3,8 +3,6 @@ #include #include -#include -#include /* These definitions mirror those in pci.h, so they can be used * interchangeably with their PCI_ counterparts */ @@ -15,52 +13,6 @@ enum dma_data_direction { DMA_NONE = 3, }; -struct dma_map_ops { - void* (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs); - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); - int (*map_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); - void (*unmap_sg)(struct device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction dir, - struct dma_attrs *attrs); - void (*sync_single_for_cpu)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction dir); - void (*sync_single_range_for_cpu)(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir); - void (*sync_single_range_for_device)(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir); - void (*sync_sg_for_cpu)(struct device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction dir); - void (*sync_sg_for_device)(struct device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction dir); - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); - int (*dma_supported)(struct device *dev, u64 mask); - int is_phys; -}; - #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) /* diff --git a/trunk/include/linux/intel-iommu.h b/trunk/include/linux/intel-iommu.h index 4d6a0a2c00b0..d2e3cbfba14f 100644 --- a/trunk/include/linux/intel-iommu.h +++ b/trunk/include/linux/intel-iommu.h @@ -331,4 +331,11 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); +extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); +extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); +extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); +extern int intel_map_sg(struct device *, struct scatterlist *, int, int); +extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); + #endif diff --git a/trunk/include/linux/swiotlb.h b/trunk/include/linux/swiotlb.h index ac9ff54f7cb3..dedd3c0cfe30 100644 --- a/trunk/include/linux/swiotlb.h +++ b/trunk/include/linux/swiotlb.h @@ -31,7 +31,7 @@ extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t address); extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); -extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); +extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, @@ -41,13 +41,20 @@ extern void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); -extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs); -extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); +extern dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); + +extern void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir); + +extern dma_addr_t +swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, + int dir, struct dma_attrs *attrs); + +extern void +swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, struct dma_attrs *attrs); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, @@ -59,38 +66,36 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs); + int dir, struct dma_attrs *attrs); extern void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs); + int nelems, int dir, struct dma_attrs *attrs); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); + size_t size, int dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); + int nelems, int dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); + size_t size, int dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); + int nelems, int dir); extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir); + unsigned long offset, size_t size, int dir); extern void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir); + int dir); extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); diff --git a/trunk/lib/swiotlb.c b/trunk/lib/swiotlb.c index 32e2bd3b1142..1f991acc2a05 100644 --- a/trunk/lib/swiotlb.c +++ b/trunk/lib/swiotlb.c @@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) return phys_to_virt(swiotlb_bus_to_phys(address)); } -int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) +int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) { return 0; } @@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); } -static inline int range_needs_mapping(phys_addr_t paddr, size_t size) +static inline int range_needs_mapping(void *ptr, size_t size) { - return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); + return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); } static int is_swiotlb_buffer(char *addr) @@ -636,14 +636,11 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ -dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - phys_addr_t phys = page_to_phys(page) + offset; - void *ptr = page_address(page) + offset; - dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); +dma_addr_t +swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, + int dir, struct dma_attrs *attrs) +{ + dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); void *map; BUG_ON(dir == DMA_NONE); @@ -652,30 +649,37 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(dev, dev_addr, size) && - !range_needs_mapping(virt_to_phys(ptr), size)) + if (!address_needs_mapping(hwdev, dev_addr, size) && + !range_needs_mapping(ptr, size)) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ - map = map_single(dev, phys, size, dir); + map = map_single(hwdev, virt_to_phys(ptr), size, dir); if (!map) { - swiotlb_full(dev, size, dir, 1); + swiotlb_full(hwdev, size, dir, 1); map = io_tlb_overflow_buffer; } - dev_addr = swiotlb_virt_to_bus(dev, map); + dev_addr = swiotlb_virt_to_bus(hwdev, map); /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(dev, dev_addr, size)) + if (address_needs_mapping(hwdev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; } -EXPORT_SYMBOL_GPL(swiotlb_map_page); +EXPORT_SYMBOL(swiotlb_map_single_attrs); + +dma_addr_t +swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) +{ + return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); +} +EXPORT_SYMBOL(swiotlb_map_single); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -685,9 +689,9 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) +void +swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, + size_t size, int dir, struct dma_attrs *attrs) { char *dma_addr = swiotlb_bus_to_virt(dev_addr); @@ -697,7 +701,15 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } -EXPORT_SYMBOL_GPL(swiotlb_unmap_page); +EXPORT_SYMBOL(swiotlb_unmap_single_attrs); + +void +swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, + int dir) +{ + return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); +} +EXPORT_SYMBOL(swiotlb_unmap_single); /* * Make physical memory consistent for a single streaming mode DMA translation @@ -724,7 +736,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) + size_t size, int dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } @@ -732,7 +744,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) + size_t size, int dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } @@ -757,8 +769,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir) + unsigned long offset, size_t size, int dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_CPU); @@ -767,8 +778,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir) + unsigned long offset, size_t size, int dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_DEVICE); @@ -793,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) + int dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -801,10 +811,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); + void *addr = sg_virt(sg); + dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); - if (range_needs_mapping(paddr, sg->length) || + if (range_needs_mapping(addr, sg->length) || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); @@ -840,7 +850,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) + int nelems, int dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -848,11 +858,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) + if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); + dma_mark_clean(sg_virt(sg), sg->dma_length); } } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); @@ -882,17 +892,17 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) + if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); + dma_mark_clean(sg_virt(sg), sg->dma_length); } } void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) + int nelems, int dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } @@ -900,7 +910,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) + int nelems, int dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); }