Skip to content

Commit

Permalink
net: introduce napi_schedule_irqoff()
Browse files Browse the repository at this point in the history
napi_schedule() can be called from any context and has to mask hard
irqs.

Add a variant that can only be called from hard interrupts handlers
or when irqs are already masked.

Many NIC drivers can use it from their hard IRQ handler instead of
generic variant.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Oct 29, 2014
1 parent c6be2a1 commit bc9ad16
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 1 deletion.
13 changes: 13 additions & 0 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,7 @@ typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);

void __napi_schedule(struct napi_struct *n);
void __napi_schedule_irqoff(struct napi_struct *n);

static inline bool napi_disable_pending(struct napi_struct *n)
{
Expand Down Expand Up @@ -420,6 +421,18 @@ static inline void napi_schedule(struct napi_struct *n)
__napi_schedule(n);
}

/**
* napi_schedule_irqoff - schedule NAPI poll
* @n: napi context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
static inline void napi_schedule_irqoff(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule_irqoff(n);
}

/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline bool napi_reschedule(struct napi_struct *napi)
{
Expand Down
15 changes: 14 additions & 1 deletion net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -4372,7 +4372,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
* __napi_schedule - schedule for receive
* @n: entry to schedule
*
* The entry's receive function will be scheduled to run
* The entry's receive function will be scheduled to run.
* Consider using __napi_schedule_irqoff() if hard irqs are masked.
*/
void __napi_schedule(struct napi_struct *n)
{
Expand All @@ -4384,6 +4385,18 @@ void __napi_schedule(struct napi_struct *n)
}
EXPORT_SYMBOL(__napi_schedule);

/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
*
* Variant of __napi_schedule() assuming hard irqs are masked
*/
void __napi_schedule_irqoff(struct napi_struct *n)
{
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);

void __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
Expand Down

0 comments on commit bc9ad16

Please sign in to comment.