Skip to content

Commit

Permalink
qede: Update receive statistic once per NAPI
Browse files Browse the repository at this point in the history
Currently, each time an ingress packet is passed to networking stack
the driver increments a per-queue SW statistic.
As we want to have additional fields in the first cache-line of the
Rx-queue struct, change flow so this statistic would be updated once per
NAPI run. We will later push the statistic to a different cache line.

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Mintz, Yuval authored and David S. Miller committed Apr 7, 2017
1 parent 1ca2212 commit 10a0176
Showing 1 changed file with 11 additions and 10 deletions.
21 changes: 11 additions & 10 deletions drivers/net/ethernet/qlogic/qede/qede_fp.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);

napi_gro_receive(&fp->napi, skb);
rxq->rcv_pkts++;
}

static void qede_set_gro_params(struct qede_dev *edev,
Expand Down Expand Up @@ -884,9 +883,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
"Strange - TPA cont with more than a single len_list entry\n");
}

static void qede_tpa_end(struct qede_dev *edev,
struct qede_fastpath *fp,
struct eth_fast_path_rx_tpa_end_cqe *cqe)
static int qede_tpa_end(struct qede_dev *edev,
struct qede_fastpath *fp,
struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_agg_info *tpa_info;
Expand Down Expand Up @@ -934,11 +933,12 @@ static void qede_tpa_end(struct qede_dev *edev,

tpa_info->state = QEDE_AGG_STATE_NONE;

return;
return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
return 0;
}

static u8 qede_check_notunn_csum(u16 flag)
Expand Down Expand Up @@ -1178,8 +1178,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
return 0;
case ETH_RX_CQE_TYPE_TPA_END:
qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
return 1;
return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
default:
return 0;
}
Expand Down Expand Up @@ -1229,7 +1228,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
/* Run eBPF program if one is attached */
if (xdp_prog)
if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
return 1;
return 0;

/* If this is an error packet then drop it */
flags = cqe->fast_path_regular.pars_flags.flags;
Expand Down Expand Up @@ -1290,8 +1289,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_dev *edev = fp->edev;
int work_done = 0, rcv_pkts = 0;
u16 hw_comp_cons, sw_comp_cons;
int work_done = 0;

hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
Expand All @@ -1305,12 +1304,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)

/* Loop to complete all indicated BDs */
while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
qede_rx_process_cqe(edev, fp, rxq);
rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
work_done++;
}

rxq->rcv_pkts += rcv_pkts;

/* Allocate replacement buffers */
while (rxq->num_rx_buffers - rxq->filled_buffers)
if (qede_alloc_rx_buffer(rxq, false))
Expand Down

0 comments on commit 10a0176

Please sign in to comment.