From 0b09503c0953ce64b129acd62eeb5833ad056b3c Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:53 -0700 Subject: [PATCH] --- yaml --- r: 41658 b: refs/heads/master c: 6c43ff18f91e54aa7555d8ae4f26eab7da5bce68 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/net/core/netpoll.c | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index d33e33045ef2..07eb691710cf 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b41848b61bae30e3661efd4ec62ea380cedef687 +refs/heads/master: 6c43ff18f91e54aa7555d8ae4f26eab7da5bce68 diff --git a/trunk/net/core/netpoll.c b/trunk/net/core/netpoll.c index 621baa5da49f..93cb828f3aaf 100644 --- a/trunk/net/core/netpoll.c +++ b/trunk/net/core/netpoll.c @@ -55,9 +55,25 @@ static void queue_process(void *p) struct netpoll_info *npinfo = p; struct sk_buff *skb; - while ((skb = skb_dequeue(&npinfo->txq))) - dev_queue_xmit(skb); + while ((skb = skb_dequeue(&npinfo->txq))) { + struct net_device *dev = skb->dev; + if (!netif_device_present(dev) || !netif_running(dev)) { + __kfree_skb(skb); + continue; + } + + netif_tx_lock_bh(dev); + if (netif_queue_stopped(dev) || + dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { + skb_queue_head(&npinfo->txq, skb); + netif_tx_unlock_bh(dev); + + schedule_delayed_work(&npinfo->tx_work, HZ/10); + return; + } + netif_tx_unlock_bh(dev); + } } void netpoll_queue(struct sk_buff *skb) @@ -765,6 +781,7 @@ void netpoll_cleanup(struct netpoll *np) if (atomic_dec_and_test(&npinfo->refcnt)) { skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->txq); + cancel_rearming_delayed_work(&npinfo->tx_work); flush_scheduled_work(); kfree(npinfo);