Skip to content

Commit

Permalink
Merge branch 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/konrad/swiotlb

Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "A new feature called restricted DMA pools. It allows SWIOTLB to
  utilize per-device (or per-platform) allocated memory pools instead of
  using the global one.

  The first big user of this is ARM Confidential Computing where the
  memory for DMA operations can be set per platform"

* 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: (23 commits)
  swiotlb: use depends on for DMA_RESTRICTED_POOL
  of: restricted dma: Don't fail device probe on rmem init failure
  of: Move of_dma_set_restricted_buffer() into device.c
  powerpc/svm: Don't issue ultracalls if !mem_encrypt_active()
  s390/pv: fix the forcing of the swiotlb
  swiotlb: Free tbl memory in swiotlb_exit()
  swiotlb: Emit diagnostic in swiotlb_exit()
  swiotlb: Convert io_default_tlb_mem to static allocation
  of: Return success from of_dma_set_restricted_buffer() when !OF_ADDRESS
  swiotlb: add overflow checks to swiotlb_bounce
  swiotlb: fix implicit debugfs declarations
  of: Add plumbing for restricted DMA pool
  dt-bindings: of: Add restricted DMA pool
  swiotlb: Add restricted DMA pool initialization
  swiotlb: Add restricted DMA alloc/free support
  swiotlb: Refactor swiotlb_tbl_unmap_single
  swiotlb: Move alloc_size to swiotlb_find_slots
  swiotlb: Use is_swiotlb_force_bounce for swiotlb data bouncing
  swiotlb: Update is_swiotlb_active to add a struct device argument
  swiotlb: Update is_swiotlb_buffer to add a struct device argument
  ...
  • Loading branch information
Linus Torvalds committed Sep 3, 2021
2 parents 1472690 + f3c4b13 commit 3de18c8
Show file tree
Hide file tree
Showing 16 changed files with 469 additions and 136 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,23 @@ compatible (optional) - standard definition
used as a shared pool of DMA buffers for a set of devices. It can
be used by an operating system to instantiate the necessary pool
management subsystem if necessary.
- restricted-dma-pool: This indicates a region of memory meant to be
used as a pool of restricted DMA buffers for a set of devices. The
memory region would be the only region accessible to those devices.
When using this, the no-map and reusable properties must not be set,
so the operating system can create a virtual mapping that will be used
for synchronization. The main purpose for restricted DMA is to
mitigate the lack of DMA access control on systems without an IOMMU,
which could result in the DMA accessing the system memory at
unexpected times and/or unexpected addresses, possibly leading to data
leakage or corruption. The feature on its own provides a basic level
of protection against the DMA overwriting buffer contents at
unexpected times. However, to protect against general data leakage and
system memory corruption, the system needs to provide way to lock down
the memory access, e.g., MPU. Note that since coherent allocation
needs remapping, one must set up another device coherent pool by
shared-dma-pool and use dma_alloc_from_dev_coherent instead for atomic
coherent allocation.
- vendor specific string in the form <vendor>,[<device>-]<usage>
no-map (optional) - empty property
- Indicates the operating system must not create a virtual mapping
Expand Down Expand Up @@ -85,10 +102,11 @@ memory-region-names (optional) - a list of names, one for each corresponding

Example
-------
This example defines 3 contiguous regions are defined for Linux kernel:
This example defines 4 contiguous regions for Linux kernel:
one default of all device drivers (named linux,cma@72000000 and 64MiB in size),
one dedicated to the framebuffer device (named framebuffer@78000000, 8MiB), and
one for multimedia processing (named multimedia-memory@77000000, 64MiB).
one dedicated to the framebuffer device (named framebuffer@78000000, 8MiB),
one for multimedia processing (named multimedia-memory@77000000, 64MiB), and
one for restricted dma pool (named restricted_dma_reserved@0x50000000, 64MiB).

/ {
#address-cells = <1>;
Expand Down Expand Up @@ -120,6 +138,11 @@ one for multimedia processing (named multimedia-memory@77000000, 64MiB).
compatible = "acme,multimedia-memory";
reg = <0x77000000 0x4000000>;
};

restricted_dma_reserved: restricted_dma_reserved {
compatible = "restricted-dma-pool";
reg = <0x50000000 0x4000000>;
};
};

/* ... */
Expand All @@ -138,4 +161,11 @@ one for multimedia processing (named multimedia-memory@77000000, 64MiB).
memory-region = <&multimedia_reserved>;
/* ... */
};

pcie_device: pcie_device@0,0 {
reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
0x83010000 0x0 0x00100000 0x0 0x00100000>;
memory-region = <&restricted_dma_reserved>;
/* ... */
};
};
6 changes: 6 additions & 0 deletions arch/powerpc/platforms/pseries/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,9 @@ void __init svm_swiotlb_init(void)

int set_memory_encrypted(unsigned long addr, int numpages)
{
if (!mem_encrypt_active())
return 0;

if (!PAGE_ALIGNED(addr))
return -EINVAL;

Expand All @@ -73,6 +76,9 @@ int set_memory_encrypted(unsigned long addr, int numpages)

int set_memory_decrypted(unsigned long addr, int numpages)
{
if (!mem_encrypt_active())
return 0;

if (!PAGE_ALIGNED(addr))
return -EINVAL;

Expand Down
2 changes: 1 addition & 1 deletion arch/s390/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,9 +187,9 @@ static void pv_init(void)
return;

/* make sure bounce buffers are shared */
swiotlb_force = SWIOTLB_FORCE;
swiotlb_init(1);
swiotlb_update_mem_attributes();
swiotlb_force = SWIOTLB_FORCE;
}

void __init mem_init(void)
Expand Down
4 changes: 4 additions & 0 deletions drivers/base/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/swiotlb.h>
#include <linux/sysfs.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */

Expand Down Expand Up @@ -2851,6 +2852,9 @@ void device_initialize(struct device *dev)
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
dev->dma_coherent = dma_default_coherent;
#endif
#ifdef CONFIG_SWIOTLB
dev->dma_io_tlb_mem = &io_tlb_default_mem;
#endif
}
EXPORT_SYMBOL_GPL(device_initialize);

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gem/i915_gem_internal.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)

max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (is_swiotlb_active()) {
if (is_swiotlb_active(obj->base.dev->dev)) {
unsigned int max_segment;

max_segment = swiotlb_max_segment();
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/nouveau/nouveau_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}

#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
need_swiotlb = is_swiotlb_active();
need_swiotlb = is_swiotlb_active(dev->dev);
#endif

ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
Expand Down
12 changes: 6 additions & 6 deletions drivers/iommu/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,

__iommu_dma_unmap(dev, dma_addr, size);

if (unlikely(is_swiotlb_buffer(phys)))
if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}

Expand Down Expand Up @@ -577,7 +577,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
}

iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova;
}
Expand Down Expand Up @@ -784,7 +784,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);

if (is_swiotlb_buffer(phys))
if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_cpu(dev, phys, size, dir);
}

Expand All @@ -797,7 +797,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;

phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys))
if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_device(dev, phys, size, dir);

if (!dev_is_dma_coherent(dev))
Expand All @@ -818,7 +818,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);

if (is_swiotlb_buffer(sg_phys(sg)))
if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir);
}
Expand All @@ -835,7 +835,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;

for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg)))
if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_device(dev, sg_phys(sg),
sg->length, dir);

Expand Down
40 changes: 40 additions & 0 deletions drivers/of/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_reserved_mem.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
#include <linux/dma-map-ops.h>
#include <linux/init.h>
Expand Down Expand Up @@ -52,6 +53,42 @@ int of_device_add(struct platform_device *ofdev)
return device_add(&ofdev->dev);
}

static void
of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
{
struct device_node *node, *of_node = dev->of_node;
int count, i;

if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
return;

count = of_property_count_elems_of_size(of_node, "memory-region",
sizeof(u32));
/*
* If dev->of_node doesn't exist or doesn't contain memory-region, try
* the OF node having DMA configuration.
*/
if (count <= 0) {
of_node = np;
count = of_property_count_elems_of_size(
of_node, "memory-region", sizeof(u32));
}

for (i = 0; i < count; i++) {
node = of_parse_phandle(of_node, "memory-region", i);
/*
* There might be multiple memory regions, but only one
* restricted-dma-pool region is allowed.
*/
if (of_device_is_compatible(node, "restricted-dma-pool") &&
of_device_is_available(node))
break;
}

if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
}

/**
* of_dma_configure_id - Setup DMA configuration
* @dev: Device to apply DMA configuration
Expand Down Expand Up @@ -165,6 +202,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,

arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);

if (!iommu)
of_dma_set_restricted_buffer(dev, np);

return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
Expand Down
2 changes: 1 addition & 1 deletion drivers/pci/xen-pcifront.c
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)

spin_unlock(&pcifront_dev_lock);

if (!err && !is_swiotlb_active()) {
if (!err && !is_swiotlb_active(&pdev->xdev->dev)) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
Expand Down
8 changes: 4 additions & 4 deletions drivers/xen/swiotlb-xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* in our domain. Therefore _only_ check address within our domain.
*/
if (pfn_valid(PFN_DOWN(paddr)))
return is_swiotlb_buffer(paddr);
return is_swiotlb_buffer(dev, paddr);
return 0;
}

Expand Down Expand Up @@ -164,7 +164,7 @@ int __ref xen_swiotlb_init(void)
int rc = -ENOMEM;
char *start;

if (io_tlb_default_mem != NULL) {
if (io_tlb_default_mem.nslabs) {
pr_warn("swiotlb buffer already initialized\n");
return -EEXIST;
}
Expand Down Expand Up @@ -374,7 +374,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
!is_swiotlb_force_bounce(dev))
goto done;

/*
Expand Down Expand Up @@ -547,7 +547,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask;
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
}

const struct dma_map_ops xen_swiotlb_dma_ops = {
Expand Down
4 changes: 4 additions & 0 deletions include/linux/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,7 @@ struct dev_links_info {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
* @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
Expand Down Expand Up @@ -533,6 +534,9 @@ struct device {
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
/* arch specific additions */
struct dev_archdata archdata;
Expand Down
Loading

0 comments on commit 3de18c8

Please sign in to comment.