Skip to content

Commit

Permalink
Merge tag 'mt76-for-kvalo-2020-09-23' of https://github.com/nbd168/wi…
Browse files Browse the repository at this point in the history
…reless

mt76 patches for 5.10

* mt7663 runtime power management improvements
* performance improvements
* sdio support fixes
* testmode fixes
* mt7622 fixes
* 7915 A-MSDU offload
* cleanups

# gpg: Signature made Thu 24 Sep 2020 07:10:25 PM EEST using DSA key ID 02A76EF5
# gpg: Good signature from "Felix Fietkau <nbd@nbd.name>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 75D1 1A7D 91A7 710F 4900  42EF D77D 141D 02A7 6EF5
  • Loading branch information
Kalle Valo committed Sep 24, 2020
2 parents 5acbf34 + b9b852b commit 225060c
Show file tree
Hide file tree
Showing 68 changed files with 1,758 additions and 1,333 deletions.
9 changes: 4 additions & 5 deletions drivers/net/wireless/mediatek/mt76/debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,14 @@ int mt76_queues_read(struct seq_file *s, void *data)
int i;

for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
struct mt76_sw_queue *q = &dev->q_tx[i];
struct mt76_queue *q = dev->q_tx[i];

if (!q->q)
if (!q)
continue;

seq_printf(s,
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
i, q->q->queued, q->q->head, q->q->tail,
q->swq_queued);
"%d: queued=%d head=%d tail=%d\n",
i, q->queued, q->head, q->tail);
}

return 0;
Expand Down
162 changes: 106 additions & 56 deletions drivers/net/wireless/mediatek/mt76/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,76 @@
#include "mt76.h"
#include "dma.h"

static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
u8 *txwi;
int size;

size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
if (!txwi)
return NULL;

addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;

return t;
}

static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;

spin_lock(&dev->lock);
if (!list_empty(&dev->txwi_cache)) {
t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
spin_unlock(&dev->lock);

return t;
}

static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);

if (t)
return t;

return mt76_alloc_txwi(dev);
}

void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
if (!t)
return;

spin_lock(&dev->lock);
list_add(&t->list, &dev->txwi_cache);
spin_unlock(&dev->lock);
}
EXPORT_SYMBOL_GPL(mt76_put_txwi);

static void
mt76_free_pending_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;

while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
}

static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
Expand Down Expand Up @@ -49,6 +119,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
Expand All @@ -61,22 +132,34 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;

idx = q->head;
q->head = (q->head + 1) % q->ndesc;

desc = &q->desc[idx];
entry = &q->entry[idx];

if (buf[0].skip_unmap)
entry->skip_buf0 = true;
entry->skip_buf1 = i == nbufs - 1;

entry->dma_addr[0] = buf[0].addr;
entry->dma_len[0] = buf[0].len;

ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
if (buf[1].skip_unmap)
entry->skip_buf1 = true;
}

if (i == nbufs - 1)
ctrl |= MT_DMA_CTL_LAST_SEC0;
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;

idx = q->head;
q->head = (q->head + 1) % q->ndesc;

desc = &q->desc[idx];

WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
Expand All @@ -96,24 +179,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
u32 ctrl = le32_to_cpu(__ctrl);

if (!e->skip_buf0) {
__le32 addr = READ_ONCE(q->desc[idx].buf0);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);

dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
if (!e->skip_buf0)
dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
}

if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
__le32 addr = READ_ONCE(q->desc[idx].buf1);
u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);

dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
if (!e->skip_buf1)
dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
}

if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
Expand All @@ -137,19 +210,17 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
writel(q->head, &q->regs->cpu_idx);
}

static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
unsigned int n_swq_queued[8] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
int last;

if (!q)
return;
Expand All @@ -159,16 +230,9 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
last = readl(&q->regs->dma_idx);

while ((q->queued > n_queued) && q->tail != last) {
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
n_swq_queued[entry.qid]++;

q->tail = (q->tail + 1) % q->ndesc;
n_queued++;

if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
mt76_queue_tx_complete(dev, q, &entry);

if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
Expand All @@ -178,29 +242,14 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)

if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
}

spin_lock_bh(&q->lock);

q->queued -= n_queued;
for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;

dev->q_tx[i].swq_queued -= n_swq_queued[i];
}

/* ext PHY */
for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;

dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
}

if (flush) {
spin_lock_bh(&q->lock);
mt76_dma_sync_idx(dev, q);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
}

wake = wake && q->stopped &&
Expand All @@ -211,8 +260,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!q->queued)
wake_up(&dev->tx_wait);

spin_unlock_bh(&q->lock);

if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
Expand All @@ -227,7 +274,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);

buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
buf_addr = e->dma_addr[0];
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
Expand Down Expand Up @@ -268,7 +315,7 @@ static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;

Expand Down Expand Up @@ -300,7 +347,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
Expand Down Expand Up @@ -378,7 +425,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,

e.skb = tx_info.skb;
e.txwi = t;
dev->drv->tx_complete_skb(dev, qid, &e);
dev->drv->tx_complete_skb(dev, &e);
mt76_put_txwi(dev, t);
return ret;
}
Expand Down Expand Up @@ -612,6 +659,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;

mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
Expand All @@ -620,5 +668,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}

mt76_free_pending_txwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
Loading

0 comments on commit 225060c

Please sign in to comment.