Skip to content

Commit

Permalink
x86/amd-iommu: Make iommu_flush_pages aware of multiple IOMMUs
Browse files Browse the repository at this point in the history
This patch extends the iommu_flush_pages function to flush
the TLB entries on all IOMMUs the domain has devices on.
This basically gives up the former assumption that dma_ops
domains are only bound to one IOMMU in the system.
For dma_ops domains this is still true but not for
IOMMU-API managed domains. Giving this assumption up for
dma_ops domains too allows code simplification.
Further it splits out the main logic into a generic function
which can be used by iommu_flush_tlb too.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
  • Loading branch information
Joerg Roedel committed Nov 27, 2009
1 parent 0518a3a commit 6de8ad9
Showing 1 changed file with 24 additions and 7 deletions.
31 changes: 24 additions & 7 deletions arch/x86/kernel/amd_iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -447,10 +447,10 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
* It invalidates a single PTE if the range to flush is within a single
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
u64 address, size_t size)
static void __iommu_flush_pages(struct protection_domain *domain,
u64 address, size_t size, int pde)
{
int s = 0;
int s = 0, i;
unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);

address &= PAGE_MASK;
Expand All @@ -464,9 +464,26 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
s = 1;
}

iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);

return 0;
for (i = 0; i < amd_iommus_present; ++i) {
if (!domain->dev_iommu[i])
continue;

/*
* Devices of this domain are behind this IOMMU
* We need a TLB flush
*/
iommu_queue_inv_iommu_pages(amd_iommus[i], address,
domain->id, pde, s);
}

return;
}

static void iommu_flush_pages(struct protection_domain *domain,
u64 address, size_t size)
{
__iommu_flush_pages(domain, address, size, 0);
}

/* Flush the whole IO/TLB for a given protection domain */
Expand Down Expand Up @@ -1683,7 +1700,7 @@ static dma_addr_t __map_single(struct device *dev,
iommu_flush_tlb(iommu, dma_dom->domain.id);
dma_dom->need_flush = false;
} else if (unlikely(iommu_has_npcache(iommu)))
iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
iommu_flush_pages(&dma_dom->domain, address, size);

out:
return address;
Expand Down Expand Up @@ -1731,7 +1748,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);

if (amd_iommu_unmap_flush || dma_dom->need_flush) {
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
iommu_flush_pages(&dma_dom->domain, dma_addr, size);
dma_dom->need_flush = false;
}
}
Expand Down

0 comments on commit 6de8ad9

Please sign in to comment.