Skip to content

Commit

Permalink
iommu/amd: Selective flush on unmap
Browse files Browse the repository at this point in the history
Recent patch attempted to enable selective page flushes on AMD IOMMU but
neglected to adapt amd_iommu_iotlb_sync() to use the selective flushes.

Adapt amd_iommu_iotlb_sync() to use selective flushes and change
amd_iommu_unmap() to collect the flushes. As a defensive measure, to
avoid potential issues as those that the Intel IOMMU driver encountered
recently, flush the page-walk caches by always setting the "pde"
parameter. This can be removed later.

Cc: Joerg Roedel <joro@8bytes.org>
Cc: Will Deacon <will@kernel.org>
Cc: Jiajun Cao <caojiajun@vmware.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Nadav Amit <namit@vmware.com>
Link: https://lore.kernel.org/r/20210723093209.714328-2-namit@vmware.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Nadav Amit authored and Joerg Roedel committed Aug 2, 2021
1 parent ee974d9 commit fc65d0a
Showing 1 changed file with 13 additions and 2 deletions.
15 changes: 13 additions & 2 deletions drivers/iommu/amd/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2060,12 +2060,17 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
size_t r;

if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;

return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;

iommu_iotlb_gather_add_page(dom, gather, iova, page_size);

return r;
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
Expand Down Expand Up @@ -2168,7 +2173,13 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
amd_iommu_flush_iotlb_all(domain);
struct protection_domain *dom = to_pdomain(domain);
unsigned long flags;

spin_lock_irqsave(&dom->lock, flags);
__domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}

static int amd_iommu_def_domain_type(struct device *dev)
Expand Down

0 comments on commit fc65d0a

Please sign in to comment.