Skip to content

Commit

Permalink
Merge tag 'dma-mapping-6.6-2023-09-30' of git://git.infradead.org/use…
Browse files Browse the repository at this point in the history
…rs/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - fix the narea calculation in swiotlb initialization (Ross Lagerwall)

 - fix the check whether a device has used swiotlb (Petr Tesarik)

* tag 'dma-mapping-6.6-2023-09-30' of git://git.infradead.org/users/hch/dma-mapping:
  swiotlb: fix the check whether a device has used software IO TLB
  swiotlb: use the calculated number of areas
  • Loading branch information
Linus Torvalds committed Sep 30, 2023
2 parents 25d48d5 + 2d5780b commit 3b51796
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 16 deletions.
23 changes: 16 additions & 7 deletions include/linux/swiotlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,14 +172,23 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
if (!mem)
return false;

if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
/* Pairs with smp_wmb() in swiotlb_find_slots() and
* swiotlb_dyn_alloc(), which modify the RCU lists.
*/
smp_rmb();
return swiotlb_find_pool(dev, paddr);
}
#ifdef CONFIG_SWIOTLB_DYNAMIC
/*
* All SWIOTLB buffer addresses must have been returned by
* swiotlb_tbl_map_single() and passed to a device driver.
* If a SWIOTLB address is checked on another CPU, then it was
* presumably loaded by the device driver from an unspecified private
* data structure. Make sure that this load is ordered before reading
* dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
*
* This barrier pairs with smp_mb() in swiotlb_find_slots().
*/
smp_rmb();
return READ_ONCE(dev->dma_uses_io_tlb) &&
swiotlb_find_pool(dev, paddr);
#else
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
#endif
}

static inline bool is_swiotlb_force_bounce(struct device *dev)
Expand Down
31 changes: 22 additions & 9 deletions kernel/dma/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,14 +399,13 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
}

mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
default_nareas), SMP_CACHE_BYTES);
nareas), SMP_CACHE_BYTES);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}

swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
default_nareas);
swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
add_mem_pool(&io_tlb_default_mem, mem);

if (flags & SWIOTLB_VERBOSE)
Expand Down Expand Up @@ -729,9 +728,6 @@ static void swiotlb_dyn_alloc(struct work_struct *work)
}

add_mem_pool(mem, pool);

/* Pairs with smp_rmb() in is_swiotlb_buffer(). */
smp_wmb();
}

/**
Expand Down Expand Up @@ -1152,9 +1148,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);

found:
dev->dma_uses_io_tlb = true;
/* Pairs with smp_rmb() in is_swiotlb_buffer() */
smp_wmb();
WRITE_ONCE(dev->dma_uses_io_tlb, true);

/*
* The general barrier orders reads and writes against a presumed store
* of the SWIOTLB buffer address by a device driver (to a driver private
* data structure). It serves two purposes.
*
* First, the store to dev->dma_uses_io_tlb must be ordered before the
* presumed store. This guarantees that the returned buffer address
* cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
*
* Second, the load from mem->pools must be ordered before the same
* presumed store. This guarantees that the returned buffer address
* cannot be observed by another CPU before an update of the RCU list
* that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
* atomicity).
*
* See also the comment in is_swiotlb_buffer().
*/
smp_mb();

*retpool = pool;
return index;
Expand Down

0 comments on commit 3b51796

Please sign in to comment.