Skip to content

Commit

Permalink
iommu/arm-smmu: use mutex instead of spinlock for locking page tables
Browse files Browse the repository at this point in the history
When creating IO mappings, we lazily allocate our page tables using the
standard, non-atomic allocator functions. This presents us with a
problem, since our page tables are protected with a spinlock.

This patch reworks the smmu_domain lock to use a mutex instead of a
spinlock. iova_to_phys is then reworked so that it only reads the page
tables, and can run in a lockless fashion, leaving the mutex to guard
against concurrent mapping threads.

Cc: <stable@vger.kernel.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
Will Deacon committed Dec 6, 2013
1 parent dc1ccc4 commit a44a979
Showing 1 changed file with 26 additions and 36 deletions.
62 changes: 26 additions & 36 deletions drivers/iommu/arm-smmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;

spinlock_t lock;
struct mutex lock;
};

static DEFINE_SPINLOCK(arm_smmu_devices_lock);
Expand Down Expand Up @@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;

spin_lock_init(&smmu_domain->lock);
mutex_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;

Expand Down Expand Up @@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
spin_lock(&smmu_domain->lock);
mutex_lock(&smmu_domain->lock);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
Expand All @@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
spin_unlock(&smmu_domain->lock);
mutex_unlock(&smmu_domain->lock);

/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
Expand All @@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);

err_unlock:
spin_unlock(&smmu_domain->lock);
mutex_unlock(&smmu_domain->lock);
return ret;
}

Expand Down Expand Up @@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;

spin_lock(&smmu_domain->lock);
mutex_lock(&smmu_domain->lock);
pgd += pgd_index(iova);
end = iova + size;
do {
Expand All @@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);

out_unlock:
spin_unlock(&smmu_domain->lock);
mutex_unlock(&smmu_domain->lock);

/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
Expand Down Expand Up @@ -1449,44 +1449,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd_t *pgdp, pgd;
pud_t pud;
pmd_t pmd;
pte_t pte;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
struct arm_smmu_device *smmu = root_cfg->smmu;

spin_lock(&smmu_domain->lock);
pgd = root_cfg->pgd;
if (!pgd)
goto err_unlock;
pgdp = root_cfg->pgd;
if (!pgdp)
return 0;

pgd += pgd_index(iova);
if (pgd_none_or_clear_bad(pgd))
goto err_unlock;
pgd = *(pgdp + pgd_index(iova));
if (pgd_none(pgd))
return 0;

pud = pud_offset(pgd, iova);
if (pud_none_or_clear_bad(pud))
goto err_unlock;
pud = *pud_offset(&pgd, iova);
if (pud_none(pud))
return 0;

pmd = pmd_offset(pud, iova);
if (pmd_none_or_clear_bad(pmd))
goto err_unlock;
pmd = *pmd_offset(&pud, iova);
if (pmd_none(pmd))
return 0;

pte = pmd_page_vaddr(*pmd) + pte_index(iova);
pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
if (pte_none(pte))
goto err_unlock;

spin_unlock(&smmu_domain->lock);
return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
return 0;

err_unlock:
spin_unlock(&smmu_domain->lock);
dev_warn(smmu->dev,
"invalid (corrupt?) page tables detected for iova 0x%llx\n",
(unsigned long long)iova);
return -EINVAL;
return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
}

static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
Expand Down

0 comments on commit a44a979

Please sign in to comment.