Skip to content

Commit

Permalink
net/mlx5e: RX, Split WQ objects for different RQ types
Browse files Browse the repository at this point in the history
Replace the common RQ WQ object with two separate ones for the
different RQ types.
This is in preparation for switching to using a cyclic WQ type
in Legacy RQ.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
  • Loading branch information
Tariq Toukan authored and Saeed Mahameed committed Jun 1, 2018
1 parent 6c3a823 commit 422d4c4
Show file tree
Hide file tree
Showing 3 changed files with 110 additions and 57 deletions.
4 changes: 2 additions & 2 deletions drivers/net/ethernet/mellanox/mlx5/core/en.h
Original file line number Diff line number Diff line change
Expand Up @@ -498,17 +498,17 @@ enum mlx5e_rq_flag {

struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;

union {
struct {
struct mlx5_wq_ll wq;
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
union {
bool page_reuse;
};
} wqe;
struct {
struct mlx5_wq_ll wq;
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
Expand Down
128 changes: 88 additions & 40 deletions drivers/net/ethernet/mellanox/mlx5/core/en_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -319,10 +319,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}

static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
{
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
default:
return mlx5_wq_ll_get_size(&rq->wqe.wq);
}
}

static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
{
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return rq->mpwqe.wq.cur_sz;
default:
return rq->wqe.wq.cur_sz;
}
}

static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);

rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
GFP_KERNEL, cpu_to_node(c->cpu));
Expand Down Expand Up @@ -370,7 +390,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,

static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));

return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
Expand All @@ -397,15 +417,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,

rqp->wq.db_numa_node = cpu_to_node(c->cpu);

err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
return err;

rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];

wq_sz = mlx5_wq_ll_get_size(&rq->wq);

rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
Expand Down Expand Up @@ -434,8 +445,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,

switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
return err;

rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];

wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);

pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);

rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;

Expand Down Expand Up @@ -472,6 +492,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_destroy_umr_mkey;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;

rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];

wq_sz = mlx5_wq_ll_get_size(&rq->wqe.wq);

rq->wqe.frag_info =
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
GFP_KERNEL, cpu_to_node(c->cpu));
Expand Down Expand Up @@ -538,16 +567,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;

for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);

if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5e_rx_wqe *wqe =
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);

wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
}
wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be;
} else {
struct mlx5e_rx_wqe *wqe =
mlx5_wq_ll_get_wqe(&rq->wqe.wq, i);

wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be;
wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be;
}
}

INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
Expand Down Expand Up @@ -744,51 +778,65 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
struct mlx5e_channel *c = rq->channel;

struct mlx5_wq_ll *wq = &rq->wq;
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));

do {
if (wq->cur_sz >= min_wqes)
if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
return 0;

msleep(20);
} while (time_before(jiffies, exp_time));

netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, wq->cur_sz, min_wqes);
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);

return -ETIMEDOUT;
}

static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_ix_be;
u16 wqe_ix;

/* UMR WQE (if in progress) is always at wq->head */
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
rq->mpwqe.umr_in_progress)
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);

while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;

if (rq->mpwqe.umr_in_progress)
mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);

while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe *wqe;

wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
} else {
struct mlx5_wq_ll *wq = &rq->wqe.wq;

while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe *wqe;

wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}

if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
/* Clean outstanding pages on handled WQEs that decided to do page-reuse,
* but yet to be re-posted.
*/
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
if (rq->wqe.page_reuse) {
int wq_sz = mlx5_wq_ll_get_size(wq);

for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
rq->dealloc_wqe(rq, wqe_ix);
for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
rq->dealloc_wqe(rq, wqe_ix);
}
}
}

Expand Down Expand Up @@ -2809,7 +2857,7 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,

param->wq.db_numa_node = param->wq.buf_numa_node;

err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;
Expand Down
35 changes: 20 additions & 15 deletions drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
mpwrq_get_cqe_consumed_strides(&cq->title);
else
cq->decmprs_wqe_counter =
mlx5_wq_ll_ctr2ix(&rq->wq, cq->decmprs_wqe_counter + 1);
mlx5_wq_ll_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
}

static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
Expand Down Expand Up @@ -369,7 +369,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)

static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);

rq->mpwqe.umr_in_progress = false;
Expand Down Expand Up @@ -470,7 +470,7 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)

bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5_wq_ll *wq = &rq->wqe.wq;
int err;

if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
Expand Down Expand Up @@ -546,7 +546,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)

bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;

if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
Expand Down Expand Up @@ -987,6 +987,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,

void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_ll *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
Expand All @@ -996,7 +997,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);

Expand All @@ -1018,7 +1019,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
mlx5_wq_ll_pop(wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}

Expand All @@ -1029,6 +1030,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_wq_ll *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
Expand All @@ -1038,7 +1040,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);

Expand All @@ -1063,7 +1065,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
mlx5_wq_ll_pop(wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}
#endif
Expand Down Expand Up @@ -1164,6 +1166,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
struct mlx5e_rx_wqe *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
u16 cqe_bcnt;

Expand Down Expand Up @@ -1193,9 +1196,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;

wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}

int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
Expand Down Expand Up @@ -1399,6 +1403,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,

void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_ll *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
Expand All @@ -1408,7 +1413,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);

Expand All @@ -1425,7 +1430,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

wq_free_wqe:
mlx5e_free_rx_wqe_reuse(rq, wi);
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
mlx5_wq_ll_pop(wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}

Expand All @@ -1435,6 +1440,7 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_ll *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
Expand All @@ -1444,7 +1450,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_counter);
wi = &rq->wqe.frag_info[wqe_counter];
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);

Expand All @@ -1465,8 +1471,7 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)

mlx5e_free_rx_wqe_reuse(rq, wi);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
mlx5_wq_ll_pop(wq, wqe_counter_be, &wqe->next.next_wqe_index);
}

#endif /* CONFIG_MLX5_EN_IPSEC */

0 comments on commit 422d4c4

Please sign in to comment.