Skip to content

Commit

Permalink
iommu/arm-smmu-qcom: Use a custom context fault handler for sdm845
Browse files Browse the repository at this point in the history
The sdm845 platform now supports TBUs, so let's get additional debug
info from the TBUs when a context fault occurs. Implement a custom
context fault handler that does both software + hardware page table
walks and TLB Invalidate All.

Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
Link: https://lore.kernel.org/r/20240417133731.2055383-5-quic_c_gdjako@quicinc.com
Signed-off-by: Will Deacon <will@kernel.org>
  • Loading branch information
Georgi Djakov authored and Will Deacon committed Apr 18, 2024
1 parent 960be6e commit d374555
Show file tree
Hide file tree
Showing 2 changed files with 147 additions and 0 deletions.
143 changes: 143 additions & 0 deletions drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,149 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
return phys;
}

static phys_addr_t qcom_smmu_iova_to_phys_hard(struct arm_smmu_domain *smmu_domain, dma_addr_t iova)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
int idx = smmu_domain->cfg.cbndx;
u32 frsynra;
u16 sid;

frsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
sid = FIELD_GET(ARM_SMMU_CBFRSYNRA_SID, frsynra);

return qcom_iova_to_phys(smmu_domain, iova, sid);
}

static phys_addr_t qcom_smmu_verify_fault(struct arm_smmu_domain *smmu_domain, dma_addr_t iova, u32 fsr)
{
struct io_pgtable *iop = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
struct arm_smmu_device *smmu = smmu_domain->smmu;
phys_addr_t phys_post_tlbiall;
phys_addr_t phys;

phys = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
io_pgtable_tlb_flush_all(iop);
phys_post_tlbiall = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);

if (phys != phys_post_tlbiall) {
dev_err(smmu->dev,
"ATOS results differed across TLBIALL... (before: %pa after: %pa)\n",
&phys, &phys_post_tlbiall);
}

return (phys == 0 ? phys_post_tlbiall : phys);
}

irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
{
struct arm_smmu_domain *smmu_domain = dev;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
struct arm_smmu_device *smmu = smmu_domain->smmu;
u32 fsr, fsynr, cbfrsynra, resume = 0;
int idx = smmu_domain->cfg.cbndx;
phys_addr_t phys_soft;
unsigned long iova;
int ret, tmp;

static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);

fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;

fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));

if (list_empty(&tbu_list)) {
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);

if (ret == -ENOSYS)
dev_err_ratelimited(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);

arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
return IRQ_HANDLED;
}

phys_soft = ops->iova_to_phys(ops, iova);

tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (!tmp || tmp == -EBUSY) {
dev_dbg(smmu->dev,
"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
iova, fsr, fsynr, idx);
dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE;
} else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr);

if (__ratelimit(&_rs)) {
dev_err(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
dev_err(smmu->dev,
"FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
fsr,
(fsr & 0x02) ? "TF " : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? "PF " : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "",
cbfrsynra);

dev_err(smmu->dev,
"soft iova-to-phys=%pa\n", &phys_soft);
if (!phys_soft)
dev_err(smmu->dev,
"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
dev_name(smmu->dev));
if (phys_atos)
dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
&phys_atos);
else
dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
}
ret = IRQ_NONE;
resume = ARM_SMMU_RESUME_TERMINATE;
}

/*
* If the client returns -EBUSY, do not clear FSR and do not RESUME
* if stalled. This is required to keep the IOMMU client stalled on
* the outstanding fault. This gives the client a chance to take any
* debug action and then terminate the stalled transaction.
* So, the sequence in case of stall on fault should be:
* 1) Do not clear FSR or write to RESUME here
* 2) Client takes any debug action
* 3) Client terminates the stalled transaction and resumes the IOMMU
* 4) Client clears FSR. The FSR should only be cleared after 3) and
* not before so that the fault remains outstanding. This ensures
* SCTLR.HUPCF has the desired effect if subsequent transactions also
* need to be terminated.
*/
if (tmp != -EBUSY) {
/* Clear the faulting FSR */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);

/* Retry or terminate any stalled transactions */
if (fsr & ARM_SMMU_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
}

return ret;
}

static int qcom_tbu_probe(struct platform_device *pdev)
{
struct of_phandle_args args = { .args_count = 2 };
Expand Down
4 changes: 4 additions & 0 deletions drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,10 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
.reset = qcom_sdm845_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
.context_fault = qcom_smmu_context_fault,
.context_fault_needs_threaded_irq = true,
#endif
};

static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
Expand Down

0 comments on commit d374555

Please sign in to comment.