Skip to content

Commit

Permalink
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/torvalds/linux; tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - improve dma-debug scalability (Eric Dumazet)

 - tiny dma-debug cleanup (Dan Carpenter)

 - check for vmap memory in dma_map_single (Kees Cook)

 - check for dma_addr_t overflows in dma-direct when using DMA offsets
   (Nicolas Saenz Julienne)

 - switch the x86 sta2x11 SOC to use more generic DMA code (Nicolas
   Saenz Julienne)

 - fix arm-nommu dma-ranges handling (Vladimir Murzin)

 - use __initdata in CMA (Shyam Saini)

 - replace the bus dma mask with a limit (Nicolas Saenz Julienne)

 - merge the remapping helpers into the main dma-direct flow (me)

 - switch xtensa to the generic dma remap handling (me)

 - various cleanups around dma_capable (me)

 - remove unused dev arguments to various dma-noncoherent helpers (me)

* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux:

* tag 'dma-mapping-5.5' of git://git.infradead.org/users/hch/dma-mapping: (22 commits)
  dma-mapping: treat dev->bus_dma_mask as a DMA limit
  dma-direct: exclude dma_direct_map_resource from the min_low_pfn check
  dma-direct: don't check swiotlb=force in dma_direct_map_resource
  dma-debug: clean up put_hash_bucket()
  powerpc: remove support for NULL dev in __phys_to_dma / __dma_to_phys
  dma-direct: avoid a forward declaration for phys_to_dma
  dma-direct: unify the dma_capable definitions
  dma-mapping: drop the dev argument to arch_sync_dma_for_*
  x86/PCI: sta2x11: use default DMA address translation
  dma-direct: check for overflows on 32 bit DMA addresses
  dma-debug: increase HASH_SIZE
  dma-debug: reorder struct dma_debug_entry fields
  xtensa: use the generic uncached segment support
  dma-mapping: merge the generic remapping helpers into dma-direct
  dma-direct: provide mmap and get_sgtable method overrides
  dma-direct: remove the dma_handle argument to __dma_direct_alloc_pages
  dma-direct: remove __dma_direct_free_pages
  usb: core: Remove redundant vmap checks
  kernel: dma-contiguous: mark CMA parameters __initdata/__initconst
  dma-debug: add a schedule point in debug_dma_dump_mappings()
  ...
  • Loading branch information
Linus Torvalds committed Nov 28, 2019
2 parents a308a71 + a7ba70f commit 81b6b96
Show file tree
Hide file tree
Showing 64 changed files with 403 additions and 670 deletions.
1 change: 0 additions & 1 deletion arch/arc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
Expand Down
8 changes: 4 additions & 4 deletions arch/arc/mm/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* upper layer functions (in include/linux/dma-mapping.h)
*/

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
Expand All @@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
Expand Down
1 change: 0 additions & 1 deletion arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ config ARM
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
Expand Down
19 changes: 0 additions & 19 deletions arch/arm/include/asm/dma-direct.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,23 +14,4 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}

static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
u64 limit, mask;

if (!dev->dma_mask)
return 0;

mask = *dev->dma_mask;

limit = (mask + 1) & ~mask;
if (limit && size > limit)
return 0;

if ((addr | (addr + size - 1)) & ~mask)
return 0;

return 1;
}

#endif /* ASM_ARM_DMA_DIRECT_H */
2 changes: 1 addition & 1 deletion arch/arm/mm/dma-mapping-nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)

{
void *ret = dma_alloc_from_global_coherent(size, dma_handle);
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);

/*
* dma_alloc_from_global_coherent() may fail because:
Expand Down
14 changes: 4 additions & 10 deletions arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -2332,26 +2332,20 @@ void arch_teardown_dma_ops(struct device *dev)
}

#ifdef CONFIG_SWIOTLB
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}

long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return dma_to_pfn(dev, dma_addr);
}

void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
Expand Down
12 changes: 6 additions & 6 deletions arch/arm/xen/mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* pfn_valid returns true the pages is local and we can use the native
* dma-direct functions, otherwise we call the Xen specific version.
*/
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
arch_sync_dma_for_cpu(dev, paddr, size, dir);
arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
}

void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
arch_sync_dma_for_device(dev, paddr, size, dir);
arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ config ARM64
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,14 @@

#include <asm/cacheflush.h>

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_map_area(phys_to_virt(paddr), size, dir);
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
Expand Down
14 changes: 7 additions & 7 deletions arch/c6x/mm/dma-coherent.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
sizeof(long));
}

static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
static void c6x_dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
Expand All @@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
return c6x_dma_sync(dev, paddr, size, dir);
return c6x_dma_sync(paddr, size, dir);
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
return c6x_dma_sync(dev, paddr, size, dir);
return c6x_dma_sync(paddr, size, dir);
}
8 changes: 4 additions & 4 deletions arch/csky/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
Expand All @@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
Expand Down
4 changes: 2 additions & 2 deletions arch/hexagon/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *addr = phys_to_virt(paddr);

Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_MMAP
select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
Expand Down
6 changes: 0 additions & 6 deletions arch/ia64/kernel/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
{
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}

long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cpu_addr));
}
4 changes: 2 additions & 2 deletions arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
unsigned long pfn = PHYS_PFN(paddr);

Expand Down
4 changes: 2 additions & 2 deletions arch/m68k/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,

#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */

void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
Expand Down
1 change: 0 additions & 1 deletion arch/microblaze/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ config MICROBLAZE
select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
Expand Down
14 changes: 7 additions & 7 deletions arch/microblaze/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>

static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
static void __dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
Expand All @@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}

void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync(dev, paddr, size, dir);
__dma_sync(paddr, size, dir);
}

void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
__dma_sync(dev, paddr, size, dir);
__dma_sync(paddr, size, dir);
}
4 changes: 2 additions & 2 deletions arch/mips/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1194,9 +1194,9 @@ config DMA_NONCOHERENT
select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
select NEED_DMA_MAP_STATE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE

config SYS_HAS_EARLY_PRINTK
bool
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/bmips/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}

void arch_sync_dma_for_cpu_all(struct device *dev)
void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;
Expand Down
8 changes: 0 additions & 8 deletions arch/mips/include/asm/dma-direct.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,6 @@
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1

static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return false;

return addr + size - 1 <= *dev->dma_mask;
}

dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);

Expand Down
Loading

0 comments on commit 81b6b96

Please sign in to comment.