Skip to content

Commit

Permalink
net: Add queue state xoff flag for stack
Browse files Browse the repository at this point in the history
Create separate queue state flags so that either the stack or drivers
can turn on XOFF.  Added a set of functions used in the stack to determine
if a queue is really stopped (either by stack or driver)

Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Tom Herbert authored and David S. Miller committed Nov 29, 2011
1 parent 75957ba commit 7346649
Show file tree
Hide file tree
Showing 7 changed files with 46 additions and 25 deletions.
41 changes: 30 additions & 11 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -517,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
#endif

enum netdev_queue_state_t {
__QUEUE_STATE_XOFF,
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
__QUEUE_STATE_FROZEN,
#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
(1 << __QUEUE_STATE_FROZEN))
#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
(1 << __QUEUE_STATE_STACK_XOFF))
#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
(1 << __QUEUE_STATE_FROZEN))
};
/*
* __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
* netif_tx_* functions below are used to manipulate this flag. The
* __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
* queue independently. The netif_xmit_*stopped functions below are called
* to check if the queue has been stopped by the driver or stack (either
* of the XOFF bits are set in the state). Drivers should not need to call
* netif_xmit*stopped functions, they should only be using netif_tx_*.
*/

struct netdev_queue {
/*
Expand Down Expand Up @@ -1718,7 +1730,7 @@ extern void __netif_schedule(struct Qdisc *q);

static inline void netif_schedule_queue(struct netdev_queue *txq)
{
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
if (!(txq->state & QUEUE_STATE_ANY_XOFF))
__netif_schedule(txq->qdisc);
}

Expand All @@ -1732,7 +1744,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)

static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}

/**
Expand Down Expand Up @@ -1764,7 +1776,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
return;
}
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}

Expand Down Expand Up @@ -1796,7 +1808,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
return;
}
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}

/**
Expand All @@ -1823,7 +1835,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)

static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}

/**
Expand All @@ -1837,9 +1849,16 @@ static inline int netif_queue_stopped(const struct net_device *dev)
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}

static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}

static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}

}

/**
Expand Down Expand Up @@ -1926,7 +1945,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
if (netpoll_trap())
return;
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}

Expand Down
4 changes: 2 additions & 2 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -2270,7 +2270,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
return rc;
}
txq_trans_update(txq);
if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
if (unlikely(netif_xmit_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);

Expand Down Expand Up @@ -2558,7 +2558,7 @@ int dev_queue_xmit(struct sk_buff *skb)

HARD_TX_LOCK(dev, txq, cpu);

if (!netif_tx_queue_stopped(txq)) {
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq);
__this_cpu_dec(xmit_recursion);
Expand Down
4 changes: 2 additions & 2 deletions net/core/netpoll.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work)

local_irq_save(flags);
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_frozen_or_stopped(txq) ||
if (netif_xmit_frozen_or_stopped(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
__netif_tx_unlock(txq);
Expand Down Expand Up @@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_tx_queue_stopped(txq)) {
if (!netif_xmit_stopped(txq)) {
status = ops->ndo_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
Expand Down
2 changes: 1 addition & 1 deletion net/core/pktgen.c
Original file line number Diff line number Diff line change
Expand Up @@ -3342,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)

__netif_tx_lock_bh(txq);

if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
ret = NETDEV_TX_BUSY;
pkt_dev->last_ok = 0;
goto unlock;
Expand Down
8 changes: 4 additions & 4 deletions net/sched/sch_generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)

/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
if (!netif_tx_queue_frozen_or_stopped(txq)) {
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
} else
Expand Down Expand Up @@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);

HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_tx_queue_frozen_or_stopped(txq))
if (!netif_xmit_frozen_or_stopped(txq))
ret = dev_hard_start_xmit(skb, dev, txq);

HARD_TX_UNLOCK(dev, txq);
Expand All @@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
ret = dev_requeue_skb(skb, q);
}

if (ret && netif_tx_queue_frozen_or_stopped(txq))
if (ret && netif_xmit_frozen_or_stopped(txq))
ret = 0;

return ret;
Expand Down Expand Up @@ -242,7 +242,7 @@ static void dev_watchdog(unsigned long arg)
* old device drivers set dev->trans_start
*/
trans_start = txq->trans_start ? : dev->trans_start;
if (netif_tx_queue_stopped(txq) &&
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
Expand Down
6 changes: 4 additions & 2 deletions net/sched/sch_multiq.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
if (!netif_xmit_stopped(
netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
Expand Down Expand Up @@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
/* Check that target subqueue is available before
* pulling an skb to avoid head-of-line blocking.
*/
if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
if (!netif_xmit_stopped(
netdev_get_tx_queue(qdisc_dev(sch), curband))) {
qdisc = q->queues[curband];
skb = qdisc->ops->peek(qdisc);
if (skb)
Expand Down
6 changes: 3 additions & 3 deletions net/sched/sch_teql.c
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)

if (slave_txq->qdisc_sleeping != q)
continue;
if (__netif_subqueue_stopped(slave, subq) ||
if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
!netif_running(slave)) {
busy = 1;
continue;
Expand All @@ -312,7 +312,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);

if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
Expand All @@ -324,7 +324,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
}
__netif_tx_unlock(slave_txq);
}
if (netif_queue_stopped(dev))
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
busy = 1;
break;
case 1:
Expand Down

0 comments on commit 7346649

Please sign in to comment.