Skip to content

Commit

Permalink
net/mlx5: hw counters: Don't maintain a counter count
Browse files Browse the repository at this point in the history
num_counters is only used for deciding whether to grow the bulk query
buffer, which is done once more counters than a small initial threshold
are present. After that, maintaining num_counters serves no purpose.

This commit replaces that with an actual xarray traversal to count the
counters. This appears expensive at first sight, but is only done when
the number of counters is less than the initial threshold (8) and only
once every sampling interval. Once the number of counters goes above the
threshold, the bulk query buffer is grown to max size and the xarray
traversal is never done again.

Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241001103709.58127-6-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Cosmin Ratiu authored and Jakub Kicinski committed Oct 4, 2024
1 parent d95f77f commit 4a67ebf
Showing 1 changed file with 18 additions and 22 deletions.
40 changes: 18 additions & 22 deletions drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
int bulk_query_len;
size_t num_counters; /* Also protected by xarray->xa_lock. */
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
struct mlx5_fc_pool fc_pool;
Expand Down Expand Up @@ -217,21 +216,28 @@ static void mlx5_fc_stats_bulk_query_buf_realloc(struct mlx5_core_dev *dev,
bulk_query_len);
}

static int mlx5_fc_num_counters(struct mlx5_fc_stats *fc_stats)
{
struct mlx5_fc *counter;
int num_counters = 0;
unsigned long id;

xa_for_each(&fc_stats->counters, id, counter)
num_counters++;
return num_counters;
}

static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_fc_stats *fc_stats = container_of(work, struct mlx5_fc_stats,
work.work);
struct mlx5_core_dev *dev = fc_stats->fc_pool.dev;
int num_counters;

queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval);

/* num_counters is only needed for determining whether to increase the buffer. */
xa_lock(&fc_stats->counters);
num_counters = fc_stats->num_counters;
xa_unlock(&fc_stats->counters);
if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
num_counters > get_init_bulk_query_len(dev))
/* Grow the bulk query buffer to max if not maxed and enough counters are present. */
if (unlikely(fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
mlx5_fc_num_counters(fc_stats) > get_init_bulk_query_len(dev)))
mlx5_fc_stats_bulk_query_buf_realloc(dev, get_max_bulk_query_len(dev));

mlx5_fc_stats_query_all_counters(dev);
Expand Down Expand Up @@ -287,15 +293,9 @@ struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
counter->lastbytes = counter->cache.bytes;
counter->lastpackets = counter->cache.packets;

xa_lock(&fc_stats->counters);

err = xa_err(__xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
if (err != 0) {
xa_unlock(&fc_stats->counters);
err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
if (err != 0)
goto err_out_alloc;
}
fc_stats->num_counters++;
xa_unlock(&fc_stats->counters);
}

return counter;
Expand Down Expand Up @@ -324,12 +324,8 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
if (!counter)
return;

if (counter->aging) {
xa_lock(&fc_stats->counters);
fc_stats->num_counters--;
__xa_erase(&fc_stats->counters, counter->id);
xa_unlock(&fc_stats->counters);
}
if (counter->aging)
xa_erase(&fc_stats->counters, counter->id);
mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
Expand Down

0 comments on commit 4a67ebf

Please sign in to comment.