Skip to content

Commit

Permalink
iommu/amd: Lock DTE before updating the entry with WRITE_ONCE()
Browse files Browse the repository at this point in the history
When updating only within a 64-bit tuple of a DTE, just lock the DTE and
use WRITE_ONCE() because it is writing to memory read back by HW.

Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20241118054937.5203-9-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Suravee Suthikulpanit authored and Joerg Roedel committed Dec 18, 2024
1 parent 66ea3f9 commit 457da57
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 19 deletions.
1 change: 1 addition & 0 deletions drivers/iommu/amd/amd_iommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,3 +186,4 @@ struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
#endif

struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
43 changes: 24 additions & 19 deletions drivers/iommu/amd/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
return dev_data;
}

static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid)
{
struct iommu_dev_data *dev_data;
struct llist_node *node;
Expand Down Expand Up @@ -2845,12 +2845,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct protection_domain *pdomain = to_pdomain(domain);
struct dev_table_entry *dev_table;
struct dev_table_entry *dte;
struct iommu_dev_data *dev_data;
bool domain_flush = false;
struct amd_iommu *iommu;
unsigned long flags;
u64 pte_root;
u64 new;

spin_lock_irqsave(&pdomain->lock, flags);
if (!(pdomain->dirty_tracking ^ enable)) {
Expand All @@ -2859,16 +2859,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}

list_for_each_entry(dev_data, &pdomain->dev_list, list) {
spin_lock(&dev_data->dte_lock);
iommu = get_amd_iommu_from_dev_data(dev_data);

dev_table = get_dev_table(iommu);
pte_root = dev_table[dev_data->devid].data[0];

pte_root = (enable ? pte_root | DTE_FLAG_HAD :
pte_root & ~DTE_FLAG_HAD);
dte = &get_dev_table(iommu)[dev_data->devid];
new = dte->data[0];
new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD);
dte->data[0] = new;
spin_unlock(&dev_data->dte_lock);

/* Flush device DTE */
dev_table[dev_data->devid].data[0] = pte_root;
device_flush_dte(dev_data);
domain_flush = true;
}
Expand Down Expand Up @@ -3135,17 +3134,23 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
u64 dte;
struct dev_table_entry *dev_table = get_dev_table(iommu);
u64 new;
struct dev_table_entry *dte = &get_dev_table(iommu)[devid];
struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);

if (dev_data)
spin_lock(&dev_data->dte_lock);

dte = dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_INTTABLEN;
dte |= DTE_IRQ_REMAP_ENABLE;
new = READ_ONCE(dte->data[2]);
new &= ~DTE_IRQ_PHYS_ADDR_MASK;
new |= iommu_virt_to_phys(table->table);
new |= DTE_IRQ_REMAP_INTCTL;
new |= DTE_INTTABLEN;
new |= DTE_IRQ_REMAP_ENABLE;
WRITE_ONCE(dte->data[2], new);

dev_table[devid].data[2] = dte;
if (dev_data)
spin_unlock(&dev_data->dte_lock);
}

static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
Expand Down

0 comments on commit 457da57

Please sign in to comment.