Skip to content

Commit

Permalink
be2net: fix RX fragment posting for jumbo frames
Browse files Browse the repository at this point in the history
In the RX path, the driver currently consumes upto 64 (budget) packets in
one NAPI sweep. When the size of the packet received is larger than a
fragment size (2K), more than one fragment is consumed for each packet.
As the driver currently posts a max of 64 fragments, all the consumed
fragments may not be replenished. This can cause avoidable drops in RX path.
This patch fixes this by posting a max(consumed_frags, 64) frags. This is
done only when there are atleast 64 free slots in the RXQ.

Signed-off-by: Ajit Khaparde <ajit.khaparde@emulex.com>
Signed-off-by: Kalesh AP <kalesh.purayil@emulex.com>
Signed-off-by: Sathya Perla <sathya.perla@emulex.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Ajit Khaparde authored and David S. Miller committed Sep 13, 2014
1 parent 242eb47 commit c30d726
Showing 1 changed file with 15 additions and 7 deletions.
22 changes: 15 additions & 7 deletions drivers/net/ethernet/emulex/benet/be_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1852,7 +1852,7 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
* Allocate a page, split it to fragments of size rx_frag_size and post as
* receive buffers to BE
*/
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Expand All @@ -1861,10 +1861,10 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
struct device *dev = &adapter->pdev->dev;
struct be_eth_rx_d *rxd;
u64 page_dmaaddr = 0, frag_dmaaddr;
u32 posted, page_offset = 0;
u32 posted, page_offset = 0, notify = 0;

page_info = &rxo->page_info_tbl[rxq->head];
for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
if (!pagep) {
pagep = be_alloc_pages(adapter->big_page_size, gfp);
if (unlikely(!pagep)) {
Expand Down Expand Up @@ -1920,7 +1920,11 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
atomic_add(posted, &rxq->used);
if (rxo->rx_post_starved)
rxo->rx_post_starved = false;
be_rxq_notify(adapter, rxq->id, posted);
do {
notify = min(256u, posted);
be_rxq_notify(adapter, rxq->id, notify);
posted -= notify;
} while (posted);
} else if (atomic_read(&rxq->used) == 0) {
/* Let be_worker replenish when memory is available */
rxo->rx_post_starved = true;
Expand Down Expand Up @@ -2371,6 +2375,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
u32 work_done;
u32 frags_consumed = 0;

for (work_done = 0; work_done < budget; work_done++) {
rxcp = be_rx_compl_get(rxo);
Expand Down Expand Up @@ -2403,6 +2408,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
be_rx_compl_process(rxo, napi, rxcp);

loop_continue:
frags_consumed += rxcp->num_rcvd;
be_rx_stats_update(rxo, rxcp);
}

Expand All @@ -2414,7 +2420,9 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
*/
if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
!rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_ATOMIC);
be_post_rx_frags(rxo, GFP_ATOMIC,
max_t(u32, MAX_RX_POST,
frags_consumed));
}

return work_done;
Expand Down Expand Up @@ -2896,7 +2904,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)

/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
return 0;
}

Expand Down Expand Up @@ -4778,7 +4786,7 @@ static void be_worker(struct work_struct *work)
* allocation failures.
*/
if (rxo->rx_post_starved)
be_post_rx_frags(rxo, GFP_KERNEL);
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
}

be_eqd_update(adapter);
Expand Down

0 comments on commit c30d726

Please sign in to comment.