diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 6272962ea077f..6e6e0a1c12b59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -850,14 +850,29 @@ static int mlx5_cpumask_default_spread(int numa_node, int index) return found_cpu; } +static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev) +{ +#ifdef CONFIG_RFS_ACCEL +#ifdef CONFIG_MLX5_SF + if (mlx5_core_is_sf(dev)) + return dev->priv.parent_mdev->priv.eq_table->rmap; +#endif + return dev->priv.eq_table->rmap; +#else + return NULL; +#endif +} + static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx) { struct mlx5_eq_table *table = dev->priv.eq_table; + struct cpu_rmap *rmap; struct mlx5_irq *irq; int cpu; + rmap = mlx5_eq_table_get_pci_rmap(dev); cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx); - irq = mlx5_irq_request_vector(dev, cpu, vecidx, &table->rmap); + irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap); if (IS_ERR(irq)) return PTR_ERR(irq); @@ -883,8 +898,13 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) struct mlx5_irq *irq; irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx); - if (IS_ERR(irq)) + if (IS_ERR(irq)) { + /* In case SF irq pool does not exist, fallback to the PF irqs*/ + if (PTR_ERR(irq) == -ENOENT) + return comp_irq_request_pci(dev, vecidx); + return PTR_ERR(irq); + } return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index ed51800f9f672..047d5fed5f89e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -191,17 +191,13 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, struct irq_affinity_desc af_desc = {}; struct mlx5_irq *irq; + if (!mlx5_irq_pool_is_sf_pool(pool)) + return ERR_PTR(-ENOENT); + af_desc.is_managed = 1; cpumask_copy(&af_desc.mask, cpu_online_mask); cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus); - if (mlx5_irq_pool_is_sf_pool(pool)) - irq = mlx5_irq_affinity_request(pool, &af_desc); - else - /* In case SF pool doesn't exists, fallback to the PF IRQs. - * The PF IRQs are already allocated and binded to CPU - * at this point. Hence, only an index is needed. - */ - irq = mlx5_irq_request(dev, vecidx, NULL, NULL); + irq = mlx5_irq_affinity_request(pool, &af_desc); if (IS_ERR(irq)) return irq;