Skip to content

Commit

Permalink
irqchip/riscv-imsic: Special handling for non-atomic device MSI update
Browse files Browse the repository at this point in the history
Devices, which have a non-atomic MSI update, might see an intermediate
state when changing the target IMSIC vector from one CPU to another.

To avoid losing interrupts due to this intermediate state, do the following
just like x86 APIC:

 1) First write a temporary IMSIC vector to the device which has the same
    MSI address as the old IMSIC vector and MSI data pointing to the new
    IMSIC vector.

 2) Next write the new IMSIC vector to the device.

Based on the above, the __imsic_local_sync() must check pending status of
both old MSI data and new MSI data on the old CPU. In addition, the
movement of IMSIC vector for non-atomic device MSI update must be done in
interrupt context using IRQCHIP_MOVE_DEFERRED.

Implememnt the logic and enforce the chip flag for PCI/MSI[X].

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250217085657.789309-11-apatel@ventanamicro.com
  • Loading branch information
Anup Patel authored and Thomas Gleixner committed Feb 20, 2025
1 parent 0bd5508 commit 896f8e4
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 15 deletions.
87 changes: 76 additions & 11 deletions drivers/irqchip/irq-riscv-imsic-platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
return 0;
}

static void imsic_irq_ack(struct irq_data *d)
{
irq_move_irq(d);
}

static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
{
phys_addr_t msi_addr;
Expand Down Expand Up @@ -97,6 +102,21 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
struct imsic_vector tmp_vec;

/*
* Requirements for the downstream irqdomains (or devices):
*
* 1) Downstream irqdomains (or devices) with atomic MSI update can
* happily do imsic_irq_set_affinity() in the process-context on
* any CPU so the irqchip of such irqdomains must not set the
* IRQCHIP_MOVE_DEFERRED flag.
*
* 2) Downstream irqdomains (or devices) with non-atomic MSI update
* must use imsic_irq_set_affinity() in nterrupt-context upon
* the next device interrupt so the irqchip of such irqdomains
* must set the IRQCHIP_MOVE_DEFERRED flag.
*/

old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
Expand All @@ -115,6 +135,32 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
if (!new_vec)
return -ENOSPC;

/*
* Device having non-atomic MSI update might see an intermediate
* state when changing target IMSIC vector from one CPU to another.
*
* To avoid losing interrupt to such intermediate state, do the
* following (just like x86 APIC):
*
* 1) First write a temporary IMSIC vector to the device which
* has MSI address same as the old IMSIC vector but MSI data
* matches the new IMSIC vector.
*
* 2) Next write the new IMSIC vector to the device.
*
* Based on the above, __imsic_local_sync() must check pending
* status of both old MSI data and new MSI data on the old CPU.
*/
if (!irq_can_move_in_process_context(d) &&
new_vec->local_id != old_vec->local_id) {
/* Setup temporary vector */
tmp_vec.cpu = old_vec->cpu;
tmp_vec.local_id = new_vec->local_id;

/* Point device to the temporary vector */
imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
}

/* Point device to the new vector */
imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);

Expand Down Expand Up @@ -163,17 +209,17 @@ static void imsic_irq_force_complete_move(struct irq_data *d)
#endif

static struct irq_chip imsic_irq_base_chip = {
.name = "IMSIC",
.irq_mask = imsic_irq_mask,
.irq_unmask = imsic_irq_unmask,
.name = "IMSIC",
.irq_mask = imsic_irq_mask,
.irq_unmask = imsic_irq_unmask,
#ifdef CONFIG_SMP
.irq_set_affinity = imsic_irq_set_affinity,
.irq_force_complete_move = imsic_irq_force_complete_move,
.irq_set_affinity = imsic_irq_set_affinity,
.irq_force_complete_move = imsic_irq_force_complete_move,
#endif
.irq_retrigger = imsic_irq_retrigger,
.irq_compose_msi_msg = imsic_irq_compose_msg,
.flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
.irq_retrigger = imsic_irq_retrigger,
.irq_ack = imsic_irq_ack,
.irq_compose_msi_msg = imsic_irq_compose_msg,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};

static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
Expand All @@ -190,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOSPC;

irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
handle_simple_irq, NULL, NULL);
handle_edge_irq, NULL, NULL);
irq_set_noprobe(virq);
irq_set_affinity(virq, cpu_online_mask);
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
Expand Down Expand Up @@ -229,15 +275,34 @@ static const struct irq_domain_ops imsic_base_domain_ops = {
#endif
};

static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent, struct msi_domain_info *info)
{
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;

switch (info->bus_token) {
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
break;
default:
break;
}

return true;
}

static const struct msi_parent_ops imsic_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSI_MASK_PARENT,
.chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.init_dev_msi_info = msi_lib_init_dev_msi_info,
.init_dev_msi_info = imsic_init_dev_msi_info,
};

int imsic_irqdomain_init(void)
Expand Down
30 changes: 26 additions & 4 deletions drivers/irqchip/irq-riscv-imsic-state.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,

static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
struct imsic_local_config *mlocal;
struct imsic_vector *vec, *mvec;
struct imsic_local_config *tlocal, *mlocal;
struct imsic_vector *vec, *tvec, *mvec;
bool ret = true;
int i;

Expand Down Expand Up @@ -169,13 +169,35 @@ static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
*/
mvec = READ_ONCE(vec->move_next);
if (mvec) {
if (__imsic_id_read_clear_pending(i)) {
/*
* Devices having non-atomic MSI update might see
* an intermediate state so check both old ID and
* new ID for pending interrupts.
*
* For details, see imsic_irq_set_affinity().
*/
tvec = vec->local_id == mvec->local_id ?
NULL : &lpriv->vectors[mvec->local_id];

if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) &&
__imsic_id_read_clear_pending(tvec->local_id)) {
/* Retrigger temporary vector if it was already in-use */
if (READ_ONCE(tvec->enable)) {
tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
writel_relaxed(tvec->local_id, tlocal->msi_va);
}

mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}

if (__imsic_id_read_clear_pending(vec->local_id)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}

WRITE_ONCE(vec->move_next, NULL);
imsic_vector_free(&lpriv->vectors[i]);
imsic_vector_free(vec);
}

skip:
Expand Down

0 comments on commit 896f8e4

Please sign in to comment.