Skip to content

Commit

Permalink
VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps
Browse files Browse the repository at this point in the history
Make iommu_flush_iotlb_psi() and flush_unmaps() more readable.

Signed-off-by: Yu Zhao <yu.zhao@intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
  • Loading branch information
Yu Zhao authored and David Woodhouse committed May 18, 2009
1 parent 6ba6c3a commit 9dd2fe8
Showing 1 changed file with 17 additions and 21 deletions.
38 changes: 17 additions & 21 deletions drivers/pci/intel-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -948,28 +948,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages)
{
unsigned int mask;
unsigned int mask = ilog2(__roundup_pow_of_two(pages));

BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);

/* Fallback to domain selective flush if no PSI support */
if (!cap_pgsel_inv(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);

/*
* Fallback to domain selective flush if no PSI support or the size is
* too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
mask = ilog2(__roundup_pow_of_two(pages));
/* Fallback to domain selective flush if size is too big */
if (mask > cap_max_amask_val(iommu->cap))
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);

return iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH);
}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
Expand Down Expand Up @@ -2260,15 +2255,16 @@ static void flush_unmaps(void)
if (!iommu)
continue;

if (deferred_flush[i].next) {
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
}
deferred_flush[i].next = 0;
if (!deferred_flush[i].next)
continue;

iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]);
}
deferred_flush[i].next = 0;
}

list_size = 0;
Expand Down

0 comments on commit 9dd2fe8

Please sign in to comment.