Skip to content

Commit

Permalink
iommu/vt-d: Cleanup intel_context_flush_present()
Browse files Browse the repository at this point in the history
The intel_context_flush_present() is called in places where either the
scalable mode is disabled, or scalable mode is enabled but all PASID
entries are known to be non-present. In these cases, the flush_domains
path within intel_context_flush_present() will never execute. This dead
code is therefore removed.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org>
Link: https://lore.kernel.org/r/20250228092631.3425464-7-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Lu Baolu authored and Joerg Roedel committed Mar 10, 2025
1 parent 87caaba commit 4c293ad
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 38 deletions.
2 changes: 1 addition & 1 deletion drivers/iommu/intel/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1783,7 +1783,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
intel_context_flush_present(info, context, did, true);
intel_context_flush_no_pasid(info, context, did);
}

int __domain_setup_first_level(struct intel_iommu *iommu,
Expand Down
5 changes: 2 additions & 3 deletions drivers/iommu/intel/iommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -1286,9 +1286,8 @@ void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end);

void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
u16 did, bool affect_domains);
void intel_context_flush_no_pasid(struct device_domain_info *info,
struct context_entry *context, u16 did);

int intel_iommu_enable_prq(struct intel_iommu *iommu);
int intel_iommu_finish_prq(struct intel_iommu *iommu);
Expand Down
41 changes: 7 additions & 34 deletions drivers/iommu/intel/pasid.c
Original file line number Diff line number Diff line change
Expand Up @@ -932,7 +932,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
intel_context_flush_present(info, context, did, false);
intel_context_flush_no_pasid(info, context, did);
}

static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
Expand Down Expand Up @@ -1119,17 +1119,15 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)

/*
* Cache invalidations after change in a context table entry that was present
* according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
* IOMMU is in scalable mode and all PASID table entries of the device were
* non-present, set flush_domains to false. Otherwise, true.
* according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
* This helper can only be used when IOMMU is working in the legacy mode or
* IOMMU is in scalable mode but all PASID table entries of the device are
* non-present.
*/
void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
u16 did, bool flush_domains)
void intel_context_flush_no_pasid(struct device_domain_info *info,
struct context_entry *context, u16 did)
{
struct intel_iommu *iommu = info->iommu;
struct pasid_entry *pte;
int i;

/*
* Device-selective context-cache invalidation. The Domain-ID field
Expand All @@ -1152,30 +1150,5 @@ void intel_context_flush_present(struct device_domain_info *info,
return;
}

/*
* For scalable mode:
* - Domain-selective PASID-cache invalidation to affected domains
* - Domain-selective IOTLB invalidation to affected domains
* - Global Device-TLB invalidation to affected functions
*/
if (flush_domains) {
/*
* If the IOMMU is running in scalable mode and there might
* be potential PASID translations, the caller should hold
* the lock to ensure that context changes and cache flushes
* are atomic.
*/
assert_spin_locked(&iommu->lock);
for (i = 0; i < info->pasid_table->max_pasid; i++) {
pte = intel_pasid_get_entry(info->dev, i);
if (!pte || !pasid_pte_is_present(pte))
continue;

did = pasid_get_domain_id(pte);
qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
}
}

__context_flush_dev_iotlb(info);
}

0 comments on commit 4c293ad

Please sign in to comment.