Skip to content

Commit

Permalink
Merge tag 'mlx5-updates-2017-09-03' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-09-03

This series from Tariq includes micro data path optimization for mlx5e
netdevice driver.

Mainly Tariq introduces the following changes to NAPI and RX handling
path of the driver:
 - RX ring structure reorganizing
 - Trivial code refactoring and optimization
 - NAPI busy-poll for when fast UMR is in progress
 - Non-atomic state operations in NAPI context
 - Remove unnecessary fields from fast path structures
 - page-cache micro optimization
 - Rely on NAPI to avoid missing an IRQ for RX/TX shared NAPI contexts
 - Stop NAPI when irq changes affinity
 - Distribute RSS table among all RX rings
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Sep 4, 2017
2 parents ccfdf21 + d4b6c48 commit 18a4ded
Show file tree
Hide file tree
Showing 8 changed files with 216 additions and 228 deletions.
49 changes: 20 additions & 29 deletions drivers/net/ethernet/mellanox/mlx5/core/en.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,10 +291,11 @@ struct mlx5e_tstamp {

enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
MLX5E_RQ_STATE_AM,
};

#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr))

struct mlx5e_cq {
/* data path - accessed per cqe */
struct mlx5_cqwq wq;
Expand Down Expand Up @@ -342,7 +343,6 @@ enum {

struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};

struct mlx5e_txqsq {
Expand Down Expand Up @@ -418,13 +418,8 @@ struct mlx5e_xdpsq {
struct mlx5e_icosq {
/* data path */

/* dirtied @completion */
u16 cc;

/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc;
u16 prev_cc;

struct mlx5e_cq cq;

Expand All @@ -438,7 +433,6 @@ struct mlx5e_icosq {
void __iomem *uar_map;
u32 sqn;
u16 edge;
struct device *pdev;
__be32 mkey_be;
unsigned long state;

Expand Down Expand Up @@ -507,7 +501,7 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
*/
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
Expand All @@ -516,7 +510,7 @@ struct mlx5e_page_cache {

struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq*, struct mlx5e_rx_wqe*, u16);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);

struct mlx5e_rq {
Expand All @@ -527,21 +521,26 @@ struct mlx5e_rq {
struct {
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
bool page_reuse;
bool xdp_xmit;
union {
bool page_reuse;
bool xdp_xmit;
};
} wqe;
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
u16 num_strides;
u8 log_stride_sz;
bool umr_in_progress;
} mpwqe;
};
struct {
u16 headroom;
u8 page_order;
u32 wqe_sz; /* wqe data buffer size */
u8 map_dir; /* dma map direction */
} buff;
__be32 mkey_be;

struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
Expand All @@ -550,12 +549,11 @@ struct mlx5e_rq {
struct mlx5e_page_cache page_cache;

mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_post_rx_wqes post_wqes;
mlx5e_fp_dealloc_wqe dealloc_wqe;

unsigned long state;
int ix;
u16 rx_headroom;

struct mlx5e_rx_am am; /* Adaptive Moderation */

Expand All @@ -565,19 +563,13 @@ struct mlx5e_rq {

/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
u8 wq_type;
u32 mpwqe_stride_sz;
u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5_core_dev *mdev;
struct mlx5_core_mkey umr_mkey;
} ____cacheline_aligned_in_smp;

enum channel_flags {
MLX5E_CHANNEL_NAPI_SCHED = 1,
};

struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
Expand All @@ -589,7 +581,9 @@ struct mlx5e_channel {
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
unsigned long flags;

/* data path - accessed per napi poll */
struct irq_desc *irq_desc;

/* control */
struct mlx5e_priv *priv;
Expand Down Expand Up @@ -850,11 +844,9 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);

void mlx5e_rx_am(struct mlx5e_rq *rq);
Expand Down Expand Up @@ -933,8 +925,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);

void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);

Expand Down
3 changes: 1 addition & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -663,8 +663,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(priv->mdev,
new_channels.params.indirection_rqt,
mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);

if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
Expand Down
55 changes: 26 additions & 29 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;

for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats;
Expand Down Expand Up @@ -593,12 +594,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}

rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->rx_headroom = params->rq_headroom;
rq->buff.headroom = params->rq_headroom;

switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:

rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;

rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
Expand All @@ -615,11 +616,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}

rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);

rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
byte_count = rq->buff.wqe_sz;
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;

err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
Expand All @@ -638,7 +638,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = -ENOMEM;
goto err_rq_wq_destroy;
}
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;

#ifdef CONFIG_MLX5_EN_IPSEC
Expand All @@ -654,18 +654,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}

rq->buff.wqe_sz = params->lro_en ?
byte_count = params->lro_en ?
params->lro_wqe_sz :
MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN;
byte_count += MLX5E_METADATA_ETHER_LEN;
#endif
rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
byte_count = rq->buff.wqe_sz;

/* calc the required page order */
rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);

Expand All @@ -676,6 +675,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);

if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;

wqe->data.addr = cpu_to_be64(dma_offset);
}

wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be;
}
Expand Down Expand Up @@ -882,7 +887,8 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
u16 wqe_ix;

/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
rq->mpwqe.umr_in_progress)
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);

while (!mlx5_wq_ll_is_empty(wq)) {
Expand Down Expand Up @@ -925,7 +931,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_destroy_rq;

if (params->rx_am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
c->rq.state |= BIT(MLX5E_RQ_STATE_AM);

return 0;

Expand All @@ -945,7 +951,6 @@ static void mlx5e_activate_rq(struct mlx5e_rq *rq)

set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
sq->db.ico_wqe[pi].num_wqebbs = 1;
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
Expand Down Expand Up @@ -1047,7 +1052,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
int err;

sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
Expand Down Expand Up @@ -1757,7 +1761,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;

c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
Expand All @@ -1774,6 +1780,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;

mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq);

netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);

err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
Expand Down Expand Up @@ -3693,7 +3702,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)

set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
/* napi_schedule in case we have missed anything */
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
napi_schedule(&c->napi);

if (old_prog)
Expand Down Expand Up @@ -3825,22 +3833,11 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}

void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
u32 *indirection_rqt, int len,
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
int node = mdev->priv.numa_node;
int node_num_of_cores;
int i;

if (node == -1)
node = first_online_node;

node_num_of_cores = cpumask_weight(cpumask_of_node(node));

if (node_num_of_cores)
num_channels = min_t(int, num_channels, node_num_of_cores);

for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
Expand Down Expand Up @@ -3979,7 +3976,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
mlx5e_build_default_indir_rqt(params->indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_channels);
}

Expand Down
Loading

0 comments on commit 18a4ded

Please sign in to comment.