Skip to content

Commit

Permalink
iommu/vt-d: Factor out helpers from domain_context_mapping_one()
Browse files Browse the repository at this point in the history
Extract common code from domain_context_mapping_one() into new helpers,
making it reusable by other functions such as the upcoming identity domain
implementation. No intentional functional changes.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20240809055431.36513-6-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Lu Baolu authored and Joerg Roedel committed Sep 2, 2024
1 parent 487df68 commit c719198
Showing 1 changed file with 58 additions and 41 deletions.
99 changes: 58 additions & 41 deletions drivers/iommu/intel/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1597,6 +1597,61 @@ static void domain_exit(struct dmar_domain *domain)
kfree(domain);
}

/*
* For kdump cases, old valid entries may be cached due to the
* in-flight DMA and copied pgtable, but there is no unmapping
* behaviour for them, thus we need an explicit cache flush for
* the newly-mapped device. For kdump, at this point, the device
* is supposed to finish reset at its driver probe stage, so no
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
static void copied_context_tear_down(struct intel_iommu *iommu,
struct context_entry *context,
u8 bus, u8 devfn)
{
u16 did_old;

if (!context_copied(iommu, bus, devfn))
return;

assert_spin_locked(&iommu->lock);

did_old = context_domain_id(context);
context_clear_entry(context);

if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}

clear_context_copied(iommu, bus, devfn);
}

/*
* It's a non-present to present mapping. If hardware doesn't cache
* non-present entry we only need to flush the write-buffer. If the
* _does_ cache non-present entries, then it does so in the special
* domain #0, which we have to flush:
*/
static void context_present_cache_flush(struct intel_iommu *iommu, u16 did,
u8 bus, u8 devfn)
{
if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
}

static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
u8 bus, u8 devfn)
Expand Down Expand Up @@ -1625,31 +1680,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;

/*
* For kdump cases, old valid entries may be cached due to the
* in-flight DMA and copied pgtable, but there is no unmapping
* behaviour for them, thus we need an explicit cache flush for
* the newly-mapped device. For kdump, at this point, the device
* is supposed to finish reset at its driver probe stage, so no
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);

if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}

clear_context_copied(iommu, bus, devfn);
}

copied_context_tear_down(iommu, context, bus, devfn);
context_clear_entry(context);

context_set_domain_id(context, did);

if (translation != CONTEXT_TT_PASS_THROUGH) {
Expand Down Expand Up @@ -1685,23 +1718,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));

/*
* It's a non-present to present mapping. If hardware doesn't cache
* non-present entry we only need to flush the write-buffer. If the
* _does_ cache non-present entries, then it does so in the special
* domain #0, which we have to flush:
*/
if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}

context_present_cache_flush(iommu, did, bus, devfn);
ret = 0;

out_unlock:
Expand Down

0 comments on commit c719198

Please sign in to comment.