Skip to content

Commit

Permalink
PCI: keystone: Add app register accessors
Browse files Browse the repository at this point in the history
Add device-specific register accessors for consistency across host drivers.
No functional change intended.

Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
  • Loading branch information
Bjorn Helgaas committed Oct 12, 2016
1 parent e481e0d commit 5c72535
Showing 1 changed file with 41 additions and 31 deletions.
72 changes: 41 additions & 31 deletions drivers/pci/host/pci-keystone-dw.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
return ks_pcie->app.start + MSI_IRQ;
}

static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
}

static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
{
writel(val, ks_pcie->va_app_base + offset);
}

void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
struct device *dev = pp->dev;
u32 pending, vector;
int src, virq;

pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));

/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
Expand Down Expand Up @@ -125,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);

writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
BIT(bit_pos));
ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}

void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
Expand All @@ -136,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);

update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
BIT(bit_pos));
}

void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
Expand All @@ -146,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);

update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
BIT(bit_pos));
}

static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
Expand Down Expand Up @@ -239,7 +249,7 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
int i;

for (i = 0; i < MAX_LEGACY_IRQS; i++)
writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}

void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
Expand All @@ -249,7 +259,7 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
u32 pending;
int virq;

pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));

if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
Expand All @@ -258,20 +268,19 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
}

/* EOI the INTx interrupt */
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
}

void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
writel(ERR_IRQ_ALL, ks_pcie->va_app_base + ERR_IRQ_ENABLE_SET);
ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
}

irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
{
u32 status;

status = readl(ks_pcie->va_app_base + ERR_IRQ_STATUS_RAW) &
ERR_IRQ_ALL;
status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;

Expand All @@ -280,7 +289,7 @@ irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
status);

/* Ack the IRQ; status bits are RW1C */
writel(status, ks_pcie->va_app_base + ERR_IRQ_STATUS);
ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
return IRQ_HANDLED;
}

Expand Down Expand Up @@ -329,11 +338,11 @@ static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;

writel(DBI_CS2_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);

do {
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}

Expand All @@ -347,11 +356,11 @@ static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;

writel(~DBI_CS2_EN_VAL & readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);

do {
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}

Expand All @@ -360,6 +369,7 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
u32 val;

/* Disable BARs for inbound access */
ks_dw_pcie_set_dbi_mode(ks_pcie);
Expand All @@ -368,20 +378,20 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_dw_pcie_clear_dbi_mode(ks_pcie);

/* Set outbound translation size per window division */
writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);

tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;

/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
start += tr_size;
}

/* Enable OB translation */
writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
}

/**
Expand Down Expand Up @@ -421,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
if (bus != 1)
regval |= BIT(24);

writel(regval, ks_pcie->va_app_base + CFG_SETUP);
ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
return pp->va_cfg0_base;
}

Expand Down Expand Up @@ -490,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
u32 val;

/* Disable Link training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);

/* Initiate Link Training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}

/**
Expand Down

0 comments on commit 5c72535

Please sign in to comment.