From 636e97b0fb0a063dc89773b13064647f5da632cb Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 30 Jan 2014 18:18:08 +0000 Subject: [PATCH 1/6] iommu/arm-smmu: set MAX_MASTER_STREAMIDS to MAX_PHANDLE_ARGS The DT parsing code that determines stream IDs uses of_parse_phandle_with_args and thus MAX_MASTER_STREAMIDS is always bound by MAX_PHANDLE_ARGS. Signed-off-by: Andreas Herrmann Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1d9ab39af29f..a3ce60887efa 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -48,7 +48,7 @@ #include /* Maximum number of stream IDs assigned to a single device */ -#define MAX_MASTER_STREAMIDS 8 +#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 From 3a5df8ff35a28bfc0a3662a5906af2c98d4807d8 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 30 Jan 2014 18:18:04 +0000 Subject: [PATCH 2/6] iommu/arm-smmu: support buggy implementations with secure cfg accesses In such a case we have to use secure aliases of some non-secure registers. This handling is switched on by DT property "calxeda,smmu-secure-config-access" for an SMMU node. Signed-off-by: Andreas Herrmann [will: merged with driver option handling patch] Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 58 +++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a3ce60887efa..e04fdcb4b9ba 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -60,6 +60,16 @@ #define ARM_SMMU_GR0(smmu) ((smmu)->base) #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) +/* + * SMMU global address space with conditional offset to access secure + * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, + * nsGFSYNR0: 0x450) + */ +#define ARM_SMMU_GR0_NS(smmu) \ + ((smmu)->base + \ + ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ + ? 0x400 : 0)) + /* Page table bits */ #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) @@ -351,6 +361,9 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) u32 features; + +#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) + u32 options; int version; u32 num_context_banks; @@ -401,6 +414,29 @@ struct arm_smmu_domain { static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); +struct arm_smmu_option_prop { + u32 opt; + const char *prop; +}; + +static struct arm_smmu_option_prop arm_smmu_options [] = { + { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, + { 0, NULL}, +}; + +static void parse_driver_options(struct arm_smmu_device *smmu) +{ + int i = 0; + do { + if (of_property_read_bool(smmu->dev->of_node, + arm_smmu_options[i].prop)) { + smmu->options |= arm_smmu_options[i].opt; + dev_notice(smmu->dev, "option %s\n", + arm_smmu_options[i].prop); + } + } while (arm_smmu_options[++i].opt); +} + static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, struct device_node *dev_node) { @@ -614,16 +650,16 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) { u32 gfsr, gfsynr0, gfsynr1, gfsynr2; struct arm_smmu_device *smmu = dev; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); - if (!gfsr) - return IRQ_NONE; - gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + if (!gfsr) + return IRQ_NONE; + dev_err_ratelimited(smmu->dev, "Unexpected global fault, this could be serious\n"); dev_err_ratelimited(smmu->dev, @@ -1597,9 +1633,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) int i = 0; u32 reg; - /* Clear Global FSR */ - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); - writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); + /* clear global FSR */ + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); /* Mark all SMRn as invalid and all S2CRn as bypass */ for (i = 0; i < smmu->num_mapping_groups; ++i) { @@ -1619,7 +1655,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); /* Enable fault reporting */ reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); @@ -1638,7 +1674,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Push the button */ arm_smmu_tlb_sync(smmu); - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); } static int arm_smmu_id_size_to_bits(int size) @@ -1885,6 +1921,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (err) goto out_put_parent; + parse_driver_options(smmu); + if (smmu->version > 1 && smmu->num_context_banks != smmu->num_context_irqs) { dev_err(dev, @@ -1969,7 +2007,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) free_irq(smmu->irqs[i], smmu); /* Turn the thing off */ - writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); + writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); return 0; } From 0c9d70d72453036195682e64efce77872e9f268b Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Thu, 30 Jan 2014 20:17:52 +0000 Subject: [PATCH 3/6] documentation/iommu: update description of ARM System MMU binding This patch adds descriptions for new properties of the device tree binding for the ARM SMMU architecture. These properties control arm-smmu driver options. Cc: Grant Likely Acked-by: Rob Herring Signed-off-by: Andreas Herrmann [will: removed device isolation property, as this has been dropped and fixed up spacing in documentation] Signed-off-by: Will Deacon --- Documentation/devicetree/bindings/iommu/arm,smmu.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index e34c6cdd8ba8..f284b99402bc 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt @@ -48,6 +48,12 @@ conditions. from the mmu-masters towards memory) node for this SMMU. +- calxeda,smmu-secure-config-access : Enable proper handling of buggy + implementations that always use secure access to + SMMU configuration registers. In this case non-secure + aliases of secure registers have to be used during + SMMU configuration. + Example: smmu { From b410aed93288d0bd7650c4d17fd0f306b5082d6f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 20 Feb 2014 16:31:06 +0000 Subject: [PATCH 4/6] iommu/arm-smmu: clean up use of `flags' in page table handling code Commit 972157cac528 ("arm/smmu: Use irqsafe spinlock for domain lock") fixed our page table locks to be the irq{save,restore} variants, since the DMA mapping API can be invoked from interrupt context. This patch cleans up our use of the flags variable so we can distinguish between IRQ flags (now `flags') and pte protection bits (now `prot'). Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e04fdcb4b9ba..83297fe0878d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1254,7 +1254,7 @@ static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long addr, unsigned long end, - unsigned long pfn, int flags, int stage) + unsigned long pfn, int prot, int stage) { pte_t *pte, *start; pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; @@ -1276,28 +1276,28 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, if (stage == 1) { pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; - if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) pteval |= ARM_SMMU_PTE_AP_RDONLY; - if (flags & IOMMU_CACHE) + if (prot & IOMMU_CACHE) pteval |= (MAIR_ATTR_IDX_CACHE << ARM_SMMU_PTE_ATTRINDX_SHIFT); } else { pteval |= ARM_SMMU_PTE_HAP_FAULT; - if (flags & IOMMU_READ) + if (prot & IOMMU_READ) pteval |= ARM_SMMU_PTE_HAP_READ; - if (flags & IOMMU_WRITE) + if (prot & IOMMU_WRITE) pteval |= ARM_SMMU_PTE_HAP_WRITE; - if (flags & IOMMU_CACHE) + if (prot & IOMMU_CACHE) pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; else pteval |= ARM_SMMU_PTE_MEMATTR_NC; } /* If no access, create a faulting entry to avoid TLB fills */ - if (flags & IOMMU_EXEC) + if (prot & IOMMU_EXEC) pteval &= ~ARM_SMMU_PTE_XN; - else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) + else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) pteval &= ~ARM_SMMU_PTE_PAGE; pteval |= ARM_SMMU_PTE_SH_IS; @@ -1359,7 +1359,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys, int flags, int stage) + phys_addr_t phys, int prot, int stage) { int ret; pmd_t *pmd; @@ -1383,7 +1383,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, do { next = pmd_addr_end(addr, end); ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, - flags, stage); + prot, stage); phys += next - addr; } while (pmd++, addr = next, addr < end); @@ -1392,7 +1392,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, unsigned long addr, unsigned long end, - phys_addr_t phys, int flags, int stage) + phys_addr_t phys, int prot, int stage) { int ret = 0; pud_t *pud; @@ -1416,7 +1416,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, do { next = pud_addr_end(addr, end); ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, - flags, stage); + prot, stage); phys += next - addr; } while (pud++, addr = next, addr < end); @@ -1425,7 +1425,7 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, unsigned long iova, phys_addr_t paddr, - size_t size, int flags) + size_t size, int prot) { int ret, stage; unsigned long end; @@ -1433,7 +1433,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; pgd_t *pgd = root_cfg->pgd; struct arm_smmu_device *smmu = root_cfg->smmu; - unsigned long irqflags; + unsigned long flags; if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { stage = 2; @@ -1456,14 +1456,14 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, if (paddr & ~output_mask) return -ERANGE; - spin_lock_irqsave(&smmu_domain->lock, irqflags); + spin_lock_irqsave(&smmu_domain->lock, flags); pgd += pgd_index(iova); end = iova + size; do { unsigned long next = pgd_addr_end(iova, end); ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, - flags, stage); + prot, stage); if (ret) goto out_unlock; @@ -1472,13 +1472,13 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, } while (pgd++, iova != end); out_unlock: - spin_unlock_irqrestore(&smmu_domain->lock, irqflags); + spin_unlock_irqrestore(&smmu_domain->lock, flags); return ret; } static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int flags) + phys_addr_t paddr, size_t size, int prot) { struct arm_smmu_domain *smmu_domain = domain->priv; @@ -1489,7 +1489,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if ((phys_addr_t)iova & ~smmu_domain->output_mask) return -ERANGE; - return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); + return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, From 3aa80ea4c90d46ffbe200d05b9ceb997001b36df Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 5 Feb 2014 23:35:47 +0000 Subject: [PATCH 5/6] iommu/arm-smmu: provide option to dsb macro when publishing tables On coherent systems, publishing new page tables to the SMMU walker is achieved with a dsb instruction. In fact, this can be a dsb(ishst) which also provides the mandatory barrier option for arm64. Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 83297fe0878d..1da5b41afc31 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -678,7 +678,7 @@ static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, /* Ensure new page tables are visible to the hardware walker */ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { - dsb(); + dsb(ishst); } else { /* * If the SMMU can't walk tables in the CPU caches, treat them From 34fb4b37b7b6da7dc34797d1abf234dd30b091d8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 26 Feb 2014 11:14:37 +0000 Subject: [PATCH 6/6] iommu/arm-smmu: fix incorrect comment regarding TLB invalidation Commit 1463fe44fd0f ("iommu/arm-smmu: Don't use VMIDs for stage-1 translations") moved our TLB invalidation from context creation time to context destruction time, but forgot to update an associated comment. This patch fixes the broken comment. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 1da5b41afc31..8b89e33a89fe 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1026,9 +1026,8 @@ static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) /* * Recursively free the page tables for this domain. We don't - * care about speculative TLB filling, because the TLB will be - * nuked next time this context bank is re-allocated and no devices - * currently map to these tables. + * care about speculative TLB filling because the tables should + * not be active in any context bank at this point (SCTLR.M is 0). */ pgd = pgd_base; for (i = 0; i < PTRS_PER_PGD; ++i) {