Skip to content

Commit

Permalink
net: Move napi polling code out of net_rx_action
Browse files Browse the repository at this point in the history
This patch creates a new function napi_poll and moves the napi
polling code from net_rx_action into it.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Herbert Xu authored and David S. Miller committed Dec 24, 2014
1 parent 0d16449 commit 726ce70
Showing 1 changed file with 54 additions and 44 deletions.
98 changes: 54 additions & 44 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -4557,22 +4557,73 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);

static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
void *have;
int work, weight;

list_del_init(&n->poll_list);

have = netpoll_poll_lock(n);

weight = n->weight;

/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}

WARN_ON_ONCE(work > weight);

if (likely(work < weight))
goto out_unlock;

/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
goto out_unlock;
}

if (n->gro_list) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
napi_gro_flush(n, HZ >= 1000);
}

list_add_tail(&n->poll_list, repoll);

out_unlock:
netpoll_poll_unlock(have);

return work;
}

static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
void *have;

local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();

while (!list_empty(&list)) {
struct napi_struct *n;
int work, weight;

/* If softirq window is exhausted then punt.
* Allow this to run for 2 jiffies since which will allow
Expand All @@ -4583,48 +4634,7 @@ static void net_rx_action(struct softirq_action *h)


n = list_first_entry(&list, struct napi_struct, poll_list);
list_del_init(&n->poll_list);

have = netpoll_poll_lock(n);

weight = n->weight;

/* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidentally calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
work = n->poll(n, weight);
trace_napi_poll(n);
}

WARN_ON_ONCE(work > weight);

budget -= work;

/* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code
* still "owns" the NAPI instance and therefore can
* move the instance around on the list at-will.
*/
if (unlikely(work == weight)) {
if (unlikely(napi_disable_pending(n))) {
napi_complete(n);
} else {
if (n->gro_list) {
/* flush too old packets
* If HZ < 1000, flush all packets.
*/
napi_gro_flush(n, HZ >= 1000);
}
list_add_tail(&n->poll_list, &repoll);
}
}

netpoll_poll_unlock(have);
budget -= napi_poll(n, &repoll);
}

if (!sd_has_rps_ipi_waiting(sd) &&
Expand Down

0 comments on commit 726ce70

Please sign in to comment.