Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 265908
b: refs/heads/master
c: 44f1d26
h: refs/heads/master
v: v3
  • Loading branch information
Felix Fietkau authored and John W. Linville committed Aug 30, 2011
1 parent 84db8ca commit 94b1687
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fa05f87ad4213a3e99bea6f5e73611dc27b4304a
refs/heads/master: 44f1d26c16d5806f23f8d7ce5b85362a1897fbef
105 changes: 66 additions & 39 deletions trunk/drivers/net/wireless/ath/ath9k/xmit.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,9 @@ static u16 bits_per_symbol[][2] = {
#define IS_HT_RATE(_rate) ((_rate) & 0x80)

static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head);
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
Expand All @@ -61,6 +62,10 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
int txok, bool update_rc);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
struct sk_buff *skb);

enum {
MCS_HT20,
Expand Down Expand Up @@ -164,14 +169,13 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
fi = get_frame_info(skb);
bf = fi->bf;

list_add_tail(&bf->list, &bf_head);

spin_unlock_bh(&txq->axq_lock);
if (fi->retries) {
if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
ath_tx_send_normal(sc, txq, NULL, &bf_head);
ath_tx_send_normal(sc, txq, NULL, skb);
}
spin_lock_bh(&txq->axq_lock);
}
Expand Down Expand Up @@ -234,6 +238,13 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
fi = get_frame_info(skb);
bf = fi->bf;

if (!bf) {
spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
spin_lock(&txq->axq_lock);
continue;
}

list_add_tail(&bf->list, &bf_head);

if (fi->retries)
Expand Down Expand Up @@ -760,8 +771,14 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
skb = skb_peek(&tid->buf_q);
fi = get_frame_info(skb);
bf = fi->bf;
seqno = bf->bf_state.seqno;
if (!fi->bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb);

if (!bf)
continue;

bf->bf_state.bf_type |= BUF_AMPDU;
seqno = bf->bf_state.seqno;
if (!bf_first)
bf_first = bf;

Expand Down Expand Up @@ -1434,13 +1451,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}

static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
struct ath_buf *bf, struct ath_tx_control *txctl)
struct sk_buff *skb, struct ath_tx_control *txctl)
{
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
u16 seqno = bf->bf_state.seqno;

bf->bf_state.bf_type |= BUF_AMPDU;
struct ath_buf *bf;

/*
* Do not queue to h/w when any of the following conditions is true:
Expand All @@ -1450,25 +1465,29 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - h/w queue depth exceeds low water mark
*/
if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
!BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, bf->bf_mpdu);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an || !txctl->an->sleeping)
ath_tx_queue_tid(txctl->txq, tid);
return;
}

bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
return;

bf->bf_state.bf_type |= BUF_AMPDU;
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);

/* Add sub-frame to BAW */
if (!fi->retries)
ath_tx_addto_baw(sc, tid, seqno);
ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);

/* Queue to h/w without aggregation */
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
Expand All @@ -1478,23 +1497,30 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
}

static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid,
struct list_head *bf_head)
struct ath_atx_tid *tid, struct sk_buff *skb)
{
struct ath_frame_info *fi;
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
struct ath_buf *bf;

bf = list_first_entry(bf_head, struct ath_buf, list);
bf = fi->bf;
if (!bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb);

if (!bf)
return;

INIT_LIST_HEAD(&bf_head);
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type &= ~BUF_AMPDU;

/* update starting sequence number for subsequent ADDBA request */
if (tid)
INCR(tid->seq_start, IEEE80211_SEQ_MAX);

bf->bf_lastbf = bf;
fi = get_frame_info(bf->bf_mpdu);
ath_buf_set_rate(sc, bf, fi->framelen);
ath_tx_txqaddbuf(sc, txq, bf_head, false);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
TX_STAT_INC(txq->axq_qnum, queued);
}

Expand Down Expand Up @@ -1717,6 +1743,10 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)

}

/*
* Assign a descriptor (and sequence number if necessary,
* and map buffer for DMA. Frees skb on error
*/
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
Expand All @@ -1734,7 +1764,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
bf = ath_tx_get_buffer(sc);
if (!bf) {
ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
return NULL;
goto error;
}

ATH_TXBUF_RESET(bf);
Expand All @@ -1757,7 +1787,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ath_err(ath9k_hw_common(sc->sc_ah),
"dma_mapping_error() on TX\n");
ath_tx_return_buffer(sc, bf);
return NULL;
goto error;
}

frm_type = get_hw_packet_type(skb);
Expand All @@ -1779,18 +1809,20 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
fi->bf = bf;

return bf;

error:
dev_kfree_skb_any(skb);
return NULL;
}

/* FIXME: tx power */
static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_tx_control *txctl)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct list_head bf_head;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
int ret = 0;
u8 tidno;

spin_lock_bh(&txctl->txq->axq_lock);
Expand All @@ -1803,21 +1835,16 @@ static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
WARN_ON(tid->ac->txq != txctl->txq);
}

bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (unlikely(!bf)) {
ret = -ENOMEM;
goto out;
}

if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
*/
ath_tx_send_ampdu(sc, tid, bf, txctl);
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
INIT_LIST_HEAD(&bf_head);
list_add_tail(&bf->list, &bf_head);
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
goto out;

bf->bf_state.bfs_paprd = txctl->paprd;

Expand All @@ -1831,12 +1858,11 @@ static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);

ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
ath_tx_send_normal(sc, txctl->txq, tid, skb);
}

out:
spin_unlock_bh(&txctl->txq->axq_lock);
return ret;
}

/* Upon failure caller should free skb */
Expand Down Expand Up @@ -1904,7 +1930,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
}
spin_unlock_bh(&txq->axq_lock);

return ath_tx_start_dma(sc, skb, txctl);
ath_tx_start_dma(sc, skb, txctl);
return 0;
}

/*****************/
Expand Down

0 comments on commit 94b1687

Please sign in to comment.