Skip to content

Commit

Permalink
net: core: use listified Rx for GRO_NORMAL in napi_gro_receive()
Browse files Browse the repository at this point in the history
Commit 323ebb6 ("net: use listified RX for handling GRO_NORMAL
skbs") made use of listified skb processing for the users of
napi_gro_frags().
The same technique can be used in a way more common napi_gro_receive()
to speed up non-merged (GRO_NORMAL) skbs for a wide range of drivers
including gro_cells and mac80211 users.
This slightly changes the return value in cases where skb is being
dropped by the core stack, but it seems to have no impact on related
drivers' functionality.
gro_normal_batch is left untouched as it's very individual for every
single system configuration and might be tuned in manual order to
achieve an optimal performance.

Signed-off-by: Alexander Lobakin <alobakin@dlink.ru>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Alexander Lobakin authored and David S. Miller committed Oct 16, 2019
1 parent 77ffe33 commit 6570bc7
Showing 1 changed file with 25 additions and 24 deletions.
49 changes: 25 additions & 24 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -5884,19 +5884,40 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
}
EXPORT_SYMBOL(gro_find_complete_by_type);

/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static void gro_normal_list(struct napi_struct *napi)
{
if (!napi->rx_count)
return;
netif_receive_skb_list_internal(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}

/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
{
list_add_tail(&skb->list, &napi->rx_list);
if (++napi->rx_count >= gro_normal_batch)
gro_normal_list(napi);
}

static void napi_skb_free_stolen_head(struct sk_buff *skb)
{
skb_dst_drop(skb);
skb_ext_put(skb);
kmem_cache_free(skbuff_head_cache, skb);
}

static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
static gro_result_t napi_skb_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret)
{
switch (ret) {
case GRO_NORMAL:
if (netif_receive_skb_internal(skb))
ret = GRO_DROP;
gro_normal_one(napi, skb);
break;

case GRO_DROP:
Expand Down Expand Up @@ -5928,7 +5949,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)

skb_gro_reset_offset(skb);

ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
trace_napi_gro_receive_exit(ret);

return ret;
Expand Down Expand Up @@ -5974,26 +5995,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_get_frags);

/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static void gro_normal_list(struct napi_struct *napi)
{
if (!napi->rx_count)
return;
netif_receive_skb_list_internal(&napi->rx_list);
INIT_LIST_HEAD(&napi->rx_list);
napi->rx_count = 0;
}

/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
{
list_add_tail(&skb->list, &napi->rx_list);
if (++napi->rx_count >= gro_normal_batch)
gro_normal_list(napi);
}

static gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret)
Expand Down

0 comments on commit 6570bc7

Please sign in to comment.