Skip to content

Commit

Permalink
iommu/arm-smmu: Remove io-pgtable spinlock
Browse files Browse the repository at this point in the history
With the io-pgtable code now robust against (valid) races, we no longer
need to serialise all operations with a lock. This might make broken
callers who issue concurrent operations on overlapping addresses go even
more wrong than before, but hey, they already had little hope of useful
or deterministic results.

We do however still have to keep a lock around to serialise the ATS1*
translation ops, as parallel iova_to_phys() calls could lead to
unpredictable hardware behaviour otherwise.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
Robin Murphy authored and Will Deacon committed Jun 23, 2017
1 parent 119ff30 commit 523d742
Showing 1 changed file with 14 additions and 31 deletions.
45 changes: 14 additions & 31 deletions drivers/iommu/arm-smmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -433,10 +433,10 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
spinlock_t pgtbl_lock;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops */
struct iommu_domain domain;
};

Expand Down Expand Up @@ -1113,7 +1113,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
}

mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
spin_lock_init(&smmu_domain->cb_lock);

return &smmu_domain->domain;
}
Expand Down Expand Up @@ -1391,35 +1391,23 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
int ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;

if (!ops)
return -ENODEV;

spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
ret = ops->map(ops, iova, paddr, size, prot);
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
return ops->map(ops, iova, paddr, size, prot);
}

static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
size_t ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;

if (!ops)
return 0;

spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
ret = ops->unmap(ops, iova, size);
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
return ops->unmap(ops, iova, size);
}

static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
Expand All @@ -1433,10 +1421,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
void __iomem *cb_base;
u32 tmp;
u64 phys;
unsigned long va;
unsigned long va, flags;

cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);

spin_lock_irqsave(&smmu_domain->cb_lock, flags);
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
if (smmu->version == ARM_SMMU_V2)
Expand All @@ -1446,13 +1435,15 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,

if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
!(tmp & ATSR_ACTIVE), 5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev,
"iova to phys timed out on %pad. Falling back to software table walk.\n",
&iova);
return ops->iova_to_phys(ops, iova);
}

phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
if (phys & CB_PAR_F) {
dev_err(dev, "translation fault!\n");
dev_err(dev, "PAR = 0x%llx\n", phys);
Expand All @@ -1465,28 +1456,20 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
phys_addr_t ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;

if (domain->type == IOMMU_DOMAIN_IDENTITY)
return iova;

if (!ops)
return 0;

spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ret = arm_smmu_iova_to_phys_hard(domain, iova);
} else {
ret = ops->iova_to_phys(ops, iova);
}

spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
return arm_smmu_iova_to_phys_hard(domain, iova);

return ret;
return ops->iova_to_phys(ops, iova);
}

static bool arm_smmu_capable(enum iommu_cap cap)
Expand Down

0 comments on commit 523d742

Please sign in to comment.