diff --git a/[refs] b/[refs] index 6a69c0ebde2e..73a0db15f450 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: abebfb18ea4fe69e9a04187a77fed4e11d67dba4 +refs/heads/master: bdd43cb39f1b1e897c5d7992d05d0b9f0dd786d1 diff --git a/trunk/Documentation/devicetree/bindings/ata/ahci-platform.txt b/trunk/Documentation/devicetree/bindings/ata/ahci-platform.txt index 6c1ad01d27e8..8bb8a76d42e8 100644 --- a/trunk/Documentation/devicetree/bindings/ata/ahci-platform.txt +++ b/trunk/Documentation/devicetree/bindings/ata/ahci-platform.txt @@ -8,9 +8,6 @@ Required properties: - interrupts : - reg : -Optional properties: -- dma-coherent : Present if dma operations are coherent - Example: sata@ffe08000 { compatible = "calxeda,hb-ahci"; diff --git a/trunk/Documentation/devicetree/bindings/dma/arm-pl330.txt b/trunk/Documentation/devicetree/bindings/dma/arm-pl330.txt index 36e27d54260b..a4cd273b2a67 100644 --- a/trunk/Documentation/devicetree/bindings/dma/arm-pl330.txt +++ b/trunk/Documentation/devicetree/bindings/dma/arm-pl330.txt @@ -9,9 +9,6 @@ Required properties: region. - interrupts: interrupt number to the cpu. -Optional properties: -- dma-coherent : Present if dma operations are coherent - Example: pdma0: pdma@12680000 { diff --git a/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt index c8ae996bd8f2..411727a3f82d 100644 --- a/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt +++ b/trunk/Documentation/devicetree/bindings/net/calxeda-xgmac.txt @@ -6,9 +6,6 @@ Required properties: - interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt. The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt. -Optional properties: -- dma-coherent : Present if dma operations are coherent - Example: ethernet@fff50000 { diff --git a/trunk/arch/arm/boot/dts/highbank.dts b/trunk/arch/arm/boot/dts/highbank.dts index 7414577c177f..9fecf1ae777b 100644 --- a/trunk/arch/arm/boot/dts/highbank.dts +++ b/trunk/arch/arm/boot/dts/highbank.dts @@ -121,7 +121,6 @@ compatible = "calxeda,hb-ahci"; reg = <0xffe08000 0x10000>; interrupts = <0 83 4>; - dma-coherent; }; sdhci@ffe0e000 { diff --git a/trunk/arch/arm/include/asm/barrier.h b/trunk/arch/arm/include/asm/barrier.h index 8dcd9c702d90..05112380dc53 100644 --- a/trunk/arch/arm/include/asm/barrier.h +++ b/trunk/arch/arm/include/asm/barrier.h @@ -44,9 +44,10 @@ #define rmb() dsb() #define wmb() mb() #else -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() +#include +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #endif #ifndef CONFIG_SMP diff --git a/trunk/arch/arm/include/asm/dma-mapping.h b/trunk/arch/arm/include/asm/dma-mapping.h index 23004847bb05..5c44dcb0987b 100644 --- a/trunk/arch/arm/include/asm/dma-mapping.h +++ b/trunk/arch/arm/include/asm/dma-mapping.h @@ -13,7 +13,6 @@ #define DMA_ERROR_CODE (~0) extern struct dma_map_ops arm_dma_ops; -extern struct dma_map_ops arm_coherent_dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) { diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index 73cf03aa981e..5f6ddcc56452 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -275,6 +275,14 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +/* + * Optional coherency support. Currently used only by selected + * Intel XSC3-based systems. + */ +#ifndef arch_is_coherent +#define arch_is_coherent() 0 +#endif + #endif #include diff --git a/trunk/arch/arm/mach-highbank/highbank.c b/trunk/arch/arm/mach-highbank/highbank.c index 93617d61f4b1..d75b0a78d88a 100644 --- a/trunk/arch/arm/mach-highbank/highbank.c +++ b/trunk/arch/arm/mach-highbank/highbank.c @@ -15,7 +15,6 @@ */ #include #include -#include #include #include #include @@ -24,7 +23,6 @@ #include #include #include -#include #include #include @@ -151,60 +149,10 @@ static void highbank_power_off(void) cpu_do_idle(); } -static int highbank_platform_notifier(struct notifier_block *nb, - unsigned long event, void *__dev) -{ - struct resource *res; - int reg = -1; - struct device *dev = __dev; - - if (event != BUS_NOTIFY_ADD_DEVICE) - return NOTIFY_DONE; - - if (of_device_is_compatible(dev->of_node, "calxeda,hb-ahci")) - reg = 0xc; - else if (of_device_is_compatible(dev->of_node, "calxeda,hb-sdhci")) - reg = 0x18; - else if (of_device_is_compatible(dev->of_node, "arm,pl330")) - reg = 0x20; - else if (of_device_is_compatible(dev->of_node, "calxeda,hb-xgmac")) { - res = platform_get_resource(to_platform_device(dev), - IORESOURCE_MEM, 0); - if (res) { - if (res->start == 0xfff50000) - reg = 0; - else if (res->start == 0xfff51000) - reg = 4; - } - } - - if (reg < 0) - return NOTIFY_DONE; - - if (of_property_read_bool(dev->of_node, "dma-coherent")) { - writel(0xff31, sregs_base + reg); - set_dma_ops(dev, &arm_coherent_dma_ops); - } else - writel(0, sregs_base + reg); - - return NOTIFY_OK; -} - -static struct notifier_block highbank_amba_nb = { - .notifier_call = highbank_platform_notifier, -}; - -static struct notifier_block highbank_platform_nb = { - .notifier_call = highbank_platform_notifier, -}; - static void __init highbank_init(void) { pm_power_off = highbank_power_off; - bus_register_notifier(&platform_bus_type, &highbank_platform_nb); - bus_register_notifier(&amba_bustype, &highbank_amba_nb); - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index de7215c399c1..13f555d62491 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -73,18 +73,11 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} - /** * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -103,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } @@ -113,7 +106,8 @@ static void arm_dma_sync_single_for_cpu(struct device *dev, { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); - __dma_page_dev_to_cpu(page, offset, size, dir); + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_dma_sync_single_for_device(struct device *dev, @@ -121,7 +115,8 @@ static void arm_dma_sync_single_for_device(struct device *dev, { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); - __dma_page_cpu_to_dev(page, offset, size, dir); + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(page, offset, size, dir); } static int arm_dma_set_mask(struct device *dev, u64 dma_mask); @@ -143,22 +138,6 @@ struct dma_map_ops arm_dma_ops = { }; EXPORT_SYMBOL(arm_dma_ops); -static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); -static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs); - -struct dma_map_ops arm_coherent_dma_ops = { - .alloc = arm_coherent_dma_alloc, - .free = arm_coherent_dma_free, - .mmap = arm_dma_mmap, - .get_sgtable = arm_dma_get_sgtable, - .map_page = arm_coherent_dma_map_page, - .map_sg = arm_dma_map_sg, - .set_dma_mask = arm_dma_set_mask, -}; -EXPORT_SYMBOL(arm_coherent_dma_ops); - static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)arm_dma_limit; @@ -607,7 +586,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) + gfp_t gfp, pgprot_t prot, const void *caller) { u64 mask = get_coherent_dma_mask(dev); struct page *page; @@ -640,7 +619,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); - if (is_coherent || nommu()) + if (arch_is_coherent() || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (gfp & GFP_ATOMIC) addr = __alloc_from_pool(size, &page); @@ -668,20 +647,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; - return __dma_alloc(dev, size, handle, gfp, prot, false, - __builtin_return_address(0)); -} - -static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) -{ - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); - void *memory; - - if (dma_alloc_from_coherent(dev, size, handle, &memory)) - return memory; - - return __dma_alloc(dev, size, handle, gfp, prot, true, + return __dma_alloc(dev, size, handle, gfp, prot, __builtin_return_address(0)); } @@ -718,9 +684,8 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, /* * Free a buffer as defined by the above mapping. */ -static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs, - bool is_coherent) +void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); @@ -729,7 +694,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, size = PAGE_ALIGN(size); - if (is_coherent || nommu()) { + if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; @@ -745,18 +710,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, } } -void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) -{ - __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); -} - -static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) -{ - __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); -} - int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, struct dma_attrs *attrs) @@ -1350,8 +1303,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, - enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) + enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova, iova_base; @@ -1370,8 +1322,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); - if (!is_coherent && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!arch_is_coherent() && + !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); ret = iommu_map(mapping->domain, iova, phys, len, 0); @@ -1389,9 +1341,20 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, return ret; } -static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) +/** + * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA + * @dev: valid struct device pointer + * @sg: list of buffers + * @nents: number of buffers to map + * @dir: DMA transfer direction + * + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * The scatter gather list elements are merged together (if possible) and + * tagged with the appropriate dma address and length. They are obtained via + * sg_dma_{address,length}. + */ +int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s = sg, *dma = sg, *start = sg; int i, count = 0; @@ -1407,7 +1370,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (__map_sg_chunk(dev, start, size, &dma->dma_address, - dir, attrs, is_coherent) < 0) + dir, attrs) < 0) goto bad_mapping; dma->dma_address += offset; @@ -1420,8 +1383,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, } size += s->length; } - if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, - is_coherent) < 0) + if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) goto bad_mapping; dma->dma_address += offset; @@ -1436,44 +1398,17 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, } /** - * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Map a set of i/o coherent buffers described by scatterlist in streaming - * mode for DMA. The scatter gather list elements are merged together (if - * possible) and tagged with the appropriate dma address and length. They are - * obtained via sg_dma_{address,length}. - */ -int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) -{ - return __iommu_map_sg(dev, sg, nents, dir, attrs, true); -} - -/** - * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA + * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction + * @nents: number of buffers to unmap (same as was passed to dma_map_sg) + * @dir: DMA transfer direction (same as was passed to dma_map_sg) * - * Map a set of buffers described by scatterlist in streaming mode for DMA. - * The scatter gather list elements are merged together (if possible) and - * tagged with the appropriate dma address and length. They are obtained via - * sg_dma_{address,length}. + * Unmap a set of streaming mode DMA translations. Again, CPU access + * rules concerning calls here are the same as for dma_unmap_single(). */ -int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) -{ - return __iommu_map_sg(dev, sg, nents, dir, attrs, false); -} - -static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) +void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *s; int i; @@ -1482,45 +1417,13 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - if (!is_coherent && + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } } -/** - * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) -{ - __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); -} - -/** - * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) -{ - __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); -} - /** * arm_iommu_sync_sg_for_cpu * @dev: valid struct device pointer @@ -1535,7 +1438,8 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) - __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } @@ -1553,21 +1457,22 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); + if (!arch_is_coherent()) + __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } /** - * arm_coherent_iommu_map_page + * arm_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * - * Coherent IOMMU aware version of arm_dma_map_page() + * IOMMU aware version of arm_dma_map_page() */ -static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, +static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { @@ -1575,6 +1480,9 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p dma_addr_t dma_addr; int ret, len = PAGE_ALIGN(size + offset); + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + __dma_page_cpu_to_dev(page, offset, size, dir); + dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; @@ -1589,51 +1497,6 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return DMA_ERROR_CODE; } -/** - * arm_iommu_map_page - * @dev: valid struct device pointer - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * IOMMU aware version of arm_dma_map_page() - */ -static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) - __dma_page_cpu_to_dev(page, offset, size, dir); - - return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); -} - -/** - * arm_coherent_iommu_unmap_page - * @dev: valid struct device pointer - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Coherent IOMMU aware version of arm_dma_unmap_page() - */ -static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_iommu_mapping *mapping = dev->archdata.mapping; - dma_addr_t iova = handle & PAGE_MASK; - int offset = handle & ~PAGE_MASK; - int len = PAGE_ALIGN(size + offset); - - if (!iova) - return; - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - /** * arm_iommu_unmap_page * @dev: valid struct device pointer @@ -1656,7 +1519,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); @@ -1674,7 +1537,8 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, if (!iova) return; - __dma_page_dev_to_cpu(page, offset, size, dir); + if (!arch_is_coherent()) + __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, @@ -1708,19 +1572,6 @@ struct dma_map_ops iommu_ops = { .sync_sg_for_device = arm_iommu_sync_sg_for_device, }; -struct dma_map_ops iommu_coherent_ops = { - .alloc = arm_iommu_alloc_attrs, - .free = arm_iommu_free_attrs, - .mmap = arm_iommu_mmap_attrs, - .get_sgtable = arm_iommu_get_sgtable, - - .map_page = arm_coherent_iommu_map_page, - .unmap_page = arm_coherent_iommu_unmap_page, - - .map_sg = arm_coherent_iommu_map_sg, - .unmap_sg = arm_coherent_iommu_unmap_sg, -}; - /** * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 8fd039929ae8..c2fa21d0103e 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -216,7 +216,7 @@ static struct mem_type mem_types[] = { .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, - }, + }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, @@ -421,6 +421,17 @@ static void __init build_mem_type_table(void) cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; + /* + * Enable CPU-specific coherency if supported. + * (Only available on XSC3 at the moment.) + */ + if (arch_is_coherent() && cpu_is_xsc3()) { + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; + } /* * ARMv6 and above have extended page tables. */ @@ -766,8 +777,8 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(md); vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); - vm->phys_addr = __pfn_to_phys(md->pfn); - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->phys_addr = __pfn_to_phys(md->pfn); + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; vm_area_add_early(vm++); diff --git a/trunk/drivers/base/dma-contiguous.c b/trunk/drivers/base/dma-contiguous.c index 34d94c762a1e..9a1469474f55 100644 --- a/trunk/drivers/base/dma-contiguous.c +++ b/trunk/drivers/base/dma-contiguous.c @@ -315,6 +315,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, { unsigned long mask, pfn, pageno, start = 0; struct cma *cma = dev_get_cma_area(dev); + struct page *page = NULL; int ret; if (!cma || !cma->count) @@ -336,18 +337,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, for (;;) { pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); - if (pageno >= cma->count) { - ret = -ENOMEM; - goto error; - } + if (pageno >= cma->count) + break; pfn = cma->base_pfn + pageno; ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); if (ret == 0) { bitmap_set(cma->bitmap, pageno, count); + page = pfn_to_page(pfn); break; } else if (ret != -EBUSY) { - goto error; + break; } pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); @@ -356,12 +356,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, } mutex_unlock(&cma_mutex); - - pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn)); - return pfn_to_page(pfn); -error: - mutex_unlock(&cma_mutex); - return NULL; + pr_debug("%s(): returned %p\n", __func__, page); + return page; } /**