Skip to content

Commit

Permalink
Merge branch 'for-next/iommu/vt-d' into for-next/iommu/core
Browse files Browse the repository at this point in the history
Intel VT-D updates for 5.11. The main thing here is converting the code
over to the iommu-dma API, which required some improvements to the core
code to preserve existing functionality.

* for-next/iommu/vt-d:
  iommu/vt-d: Avoid GFP_ATOMIC where it is not needed
  iommu/vt-d: Remove set but not used variable
  iommu/vt-d: Cleanup after converting to dma-iommu ops
  iommu/vt-d: Convert intel iommu driver to the iommu ops
  iommu/vt-d: Update domain geometry in iommu_ops.at(de)tach_dev
  iommu: Add quirk for Intel graphic devices in map_sg
  iommu: Allow the dma-iommu api to use bounce buffers
  iommu: Add iommu_dma_free_cpu_cached_iovas()
  iommu: Handle freelists when using deferred flushing in iommu drivers
  iommu/vt-d: include conditionally on CONFIG_INTEL_IOMMU_SVM
  • Loading branch information
Will Deacon committed Dec 8, 2020
2 parents c5257e3 + 33e0715 commit 113eb4c
Show file tree
Hide file tree
Showing 9 changed files with 351 additions and 834 deletions.
5 changes: 0 additions & 5 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1883,11 +1883,6 @@
Note that using this option lowers the security
provided by tboot because it makes the system
vulnerable to DMA attacks.
nobounce [Default off]
Disable bounce buffer for untrusted devices such as
the Thunderbolt devices. This will treat the untrusted
devices as the trusted ones, hence might expose security
risks of DMA attacks.

intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.
Expand Down
227 changes: 206 additions & 21 deletions drivers/iommu/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>

struct iommu_dma_msi_page {
struct list_head list;
Expand All @@ -49,6 +51,27 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
};

void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;

free_cpu_cached_iovas(cpu, iovad);
}

static void iommu_dma_entry_dtor(unsigned long data)
{
struct page *freelist = (struct page *)data;

while (freelist) {
unsigned long p = (unsigned long)page_address(freelist);

freelist = freelist->freelist;
free_page(p);
}
}

static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
Expand Down Expand Up @@ -343,7 +366,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
NULL))
iommu_dma_entry_dtor))
pr_warn("iova flush queue initialization failed\n");
else
cookie->fq_domain = domain;
Expand Down Expand Up @@ -440,7 +463,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
}

static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size)
dma_addr_t iova, size_t size, struct page *freelist)
{
struct iova_domain *iovad = &cookie->iovad;

Expand All @@ -449,7 +472,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
cookie->msi_iova -= size;
else if (cookie->fq_domain) /* non-strict mode */
queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad), 0);
size >> iova_shift(iovad),
(unsigned long)freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
Expand All @@ -474,7 +498,32 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,

if (!cookie->fq_domain)
iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
}

static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
phys_addr_t phys;

phys = iommu_iova_to_phys(domain, dma_addr);
if (WARN_ON(!phys))
return;

__iommu_dma_unmap(dev, dma_addr, size);

if (unlikely(is_swiotlb_buffer(phys)))
swiotlb_tbl_unmap_single(dev, phys, size,
iova_align(iovad, size), dir, attrs);
}

static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}

static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
Expand All @@ -496,12 +545,60 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return DMA_MAPPING_ERROR;

if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
}

static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
size_t org_size, dma_addr_t dma_mask, bool coherent,
enum dma_data_direction dir, unsigned long attrs)
{
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t aligned_size = org_size;
void *padding_start;
size_t padding_size;
dma_addr_t iova;

/*
* If both the physical buffer start address and size are
* page aligned, we don't need to use a bounce page.
*/
if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
iova_offset(iovad, phys | org_size)) {
aligned_size = iova_align(iovad, org_size);
phys = swiotlb_tbl_map_single(dev, phys, org_size,
aligned_size, dir, attrs);

if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;

/* Cleanup the padding area. */
padding_start = phys_to_virt(phys);
padding_size = aligned_size;

if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE ||
dir == DMA_BIDIRECTIONAL)) {
padding_start += org_size;
padding_size -= org_size;
}

memset(padding_start, 0, padding_size);
}

iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
swiotlb_tbl_unmap_single(dev, phys, org_size,
aligned_size, dir, attrs);

return iova;
}

static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
Expand Down Expand Up @@ -649,7 +746,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
iommu_dma_free_iova(cookie, iova, size);
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
Expand All @@ -675,23 +772,31 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
{
phys_addr_t phys;

if (dev_is_dma_coherent(dev))
if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;

phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_cpu(phys, size, dir);
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir);

if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
}

static void iommu_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
phys_addr_t phys;

if (dev_is_dma_coherent(dev))
if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;

phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
arch_sync_dma_for_device(phys, size, dir);
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);

if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir);
}

static void iommu_dma_sync_sg_for_cpu(struct device *dev,
Expand All @@ -701,11 +806,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;

if (dev_is_dma_coherent(dev))
if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;

for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
for_each_sg(sgl, sg, nelems, i) {
if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);

if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
dir, SYNC_FOR_CPU);
}
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
Expand All @@ -715,11 +826,17 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;

if (dev_is_dma_coherent(dev))
if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
return;

for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
dir, SYNC_FOR_DEVICE);

if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
Expand All @@ -728,10 +845,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;

dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
coherent, dir, attrs);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir);
Expand All @@ -743,7 +860,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
__iommu_dma_unmap(dev, dma_handle, size);
__iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
}

/*
Expand All @@ -761,6 +878,33 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;

/*
* The Intel graphic driver is used to assume that the returned
* sg list is not combound. This blocks the efforts of converting
* Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
* device driver work and should be removed once it's fixed in i915
* driver.
*/
if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
(to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
for_each_sg(sg, s, nents, i) {
unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;

s->offset += s_iova_off;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_iova_off;
sg_dma_len(s) = s_length;
dma_addr += s_iova_len;

pr_info_once("sg combining disabled due to i915 driver\n");
}

return nents;
}

for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
Expand Down Expand Up @@ -821,6 +965,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
}
}

static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;

for_each_sg(sg, s, nents, i)
__iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}

static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;

for_each_sg(sg, s, nents, i) {
sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
s->length, dma_get_mask(dev),
dev_is_dma_coherent(dev), dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
}

return nents;

out_unmap:
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}

/*
* The DMA API client is passing in a scatterlist which could describe
* any old buffer layout, but the IOMMU API requires everything to be
Expand All @@ -847,6 +1024,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);

if (dev_is_untrusted(dev))
return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);

/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
Expand Down Expand Up @@ -900,7 +1080,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);

out_free_iova:
iommu_dma_free_iova(cookie, iova, iova_len);
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
Expand All @@ -916,6 +1096,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);

if (dev_is_untrusted(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}

/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
Expand Down Expand Up @@ -1228,7 +1413,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;

out_free_iova:
iommu_dma_free_iova(cookie, iova, size);
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
Expand Down
Loading

0 comments on commit 113eb4c

Please sign in to comment.