From f7bc0f0c7094b6f0b9473ca4f743b2206f99711e Mon Sep 17 00:00:00 2001 From: James Ketrenos Date: Thu, 28 Jul 2005 16:25:55 -0500 Subject: [PATCH] --- yaml --- r: 13560 b: refs/heads/master c: 227d2dc1f109e3348564320cf42fc56770428ed3 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/net/wireless/ipw2200.c | 40 +++++++++++++++++++++------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/[refs] b/[refs] index 3d16c6732043..c431e7fc6815 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d2021cb4e28c512c395a439d65556c260b94e47e +refs/heads/master: 227d2dc1f109e3348564320cf42fc56770428ed3 diff --git a/trunk/drivers/net/wireless/ipw2200.c b/trunk/drivers/net/wireless/ipw2200.c index 0923038ca788..3b3a4a077c0e 100644 --- a/trunk/drivers/net/wireless/ipw2200.c +++ b/trunk/drivers/net/wireless/ipw2200.c @@ -9654,8 +9654,8 @@ modify to send one tfd per fragment instead of using chunking. otherwise we need to heavily modify the ieee80211_skb_to_txb. */ -static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, - int pri) +static inline int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, + int pri) { struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *) txb->fragments[0]->data; @@ -9672,6 +9672,11 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, u16 remaining_bytes; int fc; + /* If there isn't room in the queue, we return busy and let the + * network stack requeue the packet for us */ + if (ipw_queue_space(q) < q->high_mark) + return NETDEV_TX_BUSY; + switch (priv->ieee->iw_mode) { case IW_MODE_ADHOC: hdr_len = IEEE80211_3ADDR_LEN; @@ -9837,14 +9842,28 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); ipw_write32(priv, q->reg_w, q->first_empty); - if (ipw_queue_space(q) < q->high_mark) - netif_stop_queue(priv->net_dev); - - return; + return NETDEV_TX_OK; drop: IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); ieee80211_txb_free(txb); + return NETDEV_TX_OK; +} + +static int ipw_net_is_queue_full(struct net_device *dev, int pri) +{ + struct ipw_priv *priv = ieee80211_priv(dev); +#ifdef CONFIG_IPW_QOS + int tx_id = ipw_get_tx_queue_number(priv, pri); + struct clx2_tx_queue *txq = &priv->txq[tx_id]; +#else + struct clx2_tx_queue *txq = &priv->txq[0]; +#endif /* CONFIG_IPW_QOS */ + + if (ipw_queue_space(&txq->q) < txq->q.high_mark) + return 1; + + return 0; } static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, @@ -9852,6 +9871,7 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, { struct ipw_priv *priv = ieee80211_priv(dev); unsigned long flags; + int ret; IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); spin_lock_irqsave(&priv->lock, flags); @@ -9863,11 +9883,12 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, goto fail_unlock; } - ipw_tx_skb(priv, txb, pri); - __ipw_led_activity_on(priv); + ret = ipw_tx_skb(priv, txb, pri); + if (ret == NETDEV_TX_OK) + __ipw_led_activity_on(priv); spin_unlock_irqrestore(&priv->lock, flags); - return 0; + return ret; fail_unlock: spin_unlock_irqrestore(&priv->lock, flags); @@ -10706,6 +10727,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; priv->ieee->set_security = shim__set_security; + priv->ieee->is_queue_full = ipw_net_is_queue_full; #ifdef CONFIG_IPW_QOS priv->ieee->handle_management_frame = ipw_handle_management_frame;