Skip to content

Commit

Permalink
swiotlb: Add restricted DMA pool initialization
Browse files Browse the repository at this point in the history
Add the initialization function to create restricted DMA pools from
matching reserved-memory nodes.

Regardless of swiotlb setting, the restricted DMA pool is preferred if
available.

The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.

Signed-off-by: Claire Chang <tientzu@chromium.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Stefano Stabellini <sstabellini@kernel.org>
Tested-by: Will Deacon <will@kernel.org>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  • Loading branch information
Claire Chang authored and Konrad Rzeszutek Wilk committed Jun 24, 2021
1 parent 6079c4f commit 4610218
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 1 deletion.
3 changes: 2 additions & 1 deletion include/linux/swiotlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ extern enum swiotlb_force swiotlb_force;
* range check to see if the memory was in fact allocated by this
* API.
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
* @end. This is command line adjustable via setup_io_tlb_npages.
* @end. For default swiotlb, this is command line adjustable via
* setup_io_tlb_npages.
* @used: The number of used IO TLB block.
* @list: The free list describing the number of free entries available
* from each index.
Expand Down
14 changes: 14 additions & 0 deletions kernel/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,20 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE

config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
depends on OF && OF_RESERVED_MEM
select SWIOTLB
help
This enables support for restricted DMA pools which provide a level of
DMA memory protection on systems with limited hardware protection
capabilities, such as those lacking an IOMMU.

For more information see
<Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
and <kernel/dma/swiotlb.c>.
If unsure, say "n".

#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
Expand Down
76 changes: 76 additions & 0 deletions kernel/dma/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,13 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#ifdef CONFIG_DMA_RESTRICTED_POOL
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/slab.h>
#endif

#include <asm/io.h>
#include <asm/dma.h>
Expand Down Expand Up @@ -735,4 +742,73 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size)
return true;
}

static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct device *dev)
{
struct io_tlb_mem *mem = rmem->priv;
unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;

/*
* Since multiple devices can share the same pool, the private data,
* io_tlb_mem struct, will be initialized by the first device attached
* to it.
*/
if (!mem) {
mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
if (!mem)
return -ENOMEM;

set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
rmem->size >> PAGE_SHIFT);
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
mem->force_bounce = true;
mem->for_alloc = true;

rmem->priv = mem;

if (IS_ENABLED(CONFIG_DEBUG_FS)) {
mem->debugfs =
debugfs_create_dir(rmem->name, debugfs_dir);
swiotlb_create_debugfs_files(mem);
}
}

dev->dma_io_tlb_mem = mem;

return 0;
}

static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev->dma_io_tlb_mem = io_tlb_default_mem;
}

static const struct reserved_mem_ops rmem_swiotlb_ops = {
.device_init = rmem_swiotlb_device_init,
.device_release = rmem_swiotlb_device_release,
};

static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;

if (of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;

if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
pr_err("Restricted DMA pool must be accessible within the linear mapping.");
return -EINVAL;
}

rmem->ops = &rmem_swiotlb_ops;
pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}

RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
#endif /* CONFIG_DMA_RESTRICTED_POOL */

0 comments on commit 4610218

Please sign in to comment.