Skip to content

Commit

Permalink
net/mlx5: Add IRQ vector to CPU lookup function
Browse files Browse the repository at this point in the history
Currently, once driver load completes, IRQ requests were performed for all
vectors. However, as we move to support dynamic creation of EQs, this will
not be the case as some IRQs will not exist at this stage. Thus, in such
case, use the default CPU to IRQ mapping which is the serial mapping based
on IRQ vector index. Meaning, the n'th vector gets mapped to the n'th CPU.

Introduce an API function mlx5_comp_vector_cpu() that takes an IRQ index and
provides the corresponding CPU mapping. It utilizes the existing IRQ
affinity if defined, or resorts to the default serialized CPU mapping
otherwise.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
  • Loading branch information
Maher Sanalla authored and Saeed Mahameed committed Aug 7, 2023
1 parent ddd2c79 commit f314701
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 8 deletions.
2 changes: 1 addition & 1 deletion drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,

static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, 0);
struct net_device *netdev = priv->netdev;
struct mlx5e_trap *t;
int err;
Expand Down
4 changes: 2 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2445,7 +2445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
Expand Down Expand Up @@ -2862,7 +2862,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
cpumask_clear(priv->scratchpad.cpumask);

for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);

cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
Expand Down
19 changes: 16 additions & 3 deletions drivers/net/ethernet/mellanox/mlx5/core/eq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1058,7 +1058,7 @@ unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_comp_vectors_count);

struct cpumask *
static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
Expand All @@ -1068,10 +1068,23 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
if (eq)
return mlx5_irq_get_affinity_mask(eq->core.irq);

WARN_ON_ONCE(1);
return NULL;
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);

int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
{
struct cpumask *mask;
int cpu;

mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
if (mask)
cpu = cpumask_first(mask);
else
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);

return cpu;
}
EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);

#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
Expand Down
3 changes: 1 addition & 2 deletions include/linux/mlx5/driver.h
Original file line number Diff line number Diff line change
Expand Up @@ -1109,8 +1109,7 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);

unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
Expand Down

0 comments on commit f314701

Please sign in to comment.