From f9ec5723c3dbfcede9c7b0dcdf85e401ce16316c Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 23 Jul 2022 16:29:29 +0200 Subject: [PATCH 1/5] net: ethernet: stmicro: stmmac: move queue reset to dedicated functions Move queue reset to dedicated functions. This aside from a simple cleanup is also required to allocate a dma conf without resetting the tx queue while the device is temporarily detached as now the reset is not part of the dma init function and can be done later in the code flow. Signed-off-by: Christian Marangi Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 89eb3e51b22cc..5d08d09a9d516 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -130,6 +130,9 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); +static void stmmac_reset_queues_param(struct stmmac_priv *priv); static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, @@ -1639,9 +1642,6 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f return -ENOMEM; } - rx_q->cur_rx = 0; - rx_q->dirty_rx = 0; - /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) @@ -1744,12 +1744,6 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff[i] = NULL; } - tx_q->dirty_tx = 0; - tx_q->cur_tx = 0; - tx_q->mss = 0; - - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); - return 0; } @@ -2635,10 +2629,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx_dma(priv, chan); dma_free_tx_skbufs(priv, chan); stmmac_clear_tx_descriptors(priv, chan); - tx_q->dirty_tx = 0; - tx_q->cur_tx = 0; - tx_q->mss = 0; - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); + stmmac_reset_tx_queue(priv, chan); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); stmmac_start_tx_dma(priv, chan); @@ -3705,6 +3696,8 @@ static int stmmac_open(struct net_device *dev) goto init_error; } + stmmac_reset_queues_param(priv); + ret = stmmac_hw_setup(dev, true); if (ret < 0) { netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); @@ -6331,6 +6324,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) return; } + stmmac_reset_rx_queue(priv, queue); stmmac_clear_rx_descriptors(priv, queue); stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, @@ -6392,6 +6386,7 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) return; } + stmmac_reset_tx_queue(priv, queue); stmmac_clear_tx_descriptors(priv, queue); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, @@ -7319,6 +7314,25 @@ int stmmac_suspend(struct device *dev) } EXPORT_SYMBOL_GPL(stmmac_suspend); +static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + + rx_q->cur_rx = 0; + rx_q->dirty_rx = 0; +} + +static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + tx_q->cur_tx = 0; + tx_q->dirty_tx = 0; + tx_q->mss = 0; + + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); +} + /** * stmmac_reset_queues_param - reset queue parameters * @priv: device pointer @@ -7329,22 +7343,11 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv) u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queue; - for (queue = 0; queue < rx_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - - rx_q->cur_rx = 0; - rx_q->dirty_rx = 0; - } - - for (queue = 0; queue < tx_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + for (queue = 0; queue < rx_cnt; queue++) + stmmac_reset_rx_queue(priv, queue); - tx_q->cur_tx = 0; - tx_q->dirty_tx = 0; - tx_q->mss = 0; - - netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); - } + for (queue = 0; queue < tx_cnt; queue++) + stmmac_reset_tx_queue(priv, queue); } /** From 7028471edb646bfc532fec0973e50e784cdcb7c6 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 23 Jul 2022 16:29:30 +0200 Subject: [PATCH 2/5] net: ethernet: stmicro: stmmac: first disable all queues and disconnect in release Disable all queues and disconnect before tx_disable in stmmac_release to prevent a corner case where packet may be still queued at the same time tx_disable is called resulting in kernel panic if some packet still has to be processed. Signed-off-by: Christian Marangi Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5d08d09a9d516..5bd2e4d47ac5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3757,8 +3757,6 @@ static int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; - netif_tx_disable(dev); - if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ @@ -3770,6 +3768,8 @@ static int stmmac_release(struct net_device *dev) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->tx_queue[chan].txtimer); + netif_tx_disable(dev); + /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); From 8531c80800c10e8ef7952022326c2f983e1314bf Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 23 Jul 2022 16:29:31 +0200 Subject: [PATCH 3/5] net: ethernet: stmicro: stmmac: move dma conf to dedicated struct Move dma buf conf to dedicated struct. This in preparation for code rework that will permit to allocate separate dma_conf without affecting the priv struct. Signed-off-by: Christian Marangi Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/chain_mode.c | 6 +- .../net/ethernet/stmicro/stmmac/ring_mode.c | 4 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 21 +- .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 286 +++++++++--------- .../stmicro/stmmac/stmmac_selftests.c | 8 +- .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +- 7 files changed, 172 insertions(+), 163 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index d2cdc02d9f944..2e8744ac6b91a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) while (len != 0) { tx_q->tx_skbuff[entry] = NULL; - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); desc = tx_q->dma_tx + entry; if (len > bmax) { @@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) */ p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy + (((rx_q->dirty_rx) + 1) % - priv->dma_rx_size) * + priv->dma_conf.dma_rx_size) * sizeof(struct dma_desc))); } @@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p) */ p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy + ((tx_q->dirty_tx + 1) % - priv->dma_tx_size)) + priv->dma_conf.dma_tx_size)) * sizeof(struct dma_desc))); } diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 8ad900949dc8b..2b5b17d8b8a01 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum) stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_RING_MODE, 0, false, skb->len); tx_q->tx_skbuff[entry] = NULL; - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); if (priv->extend_desc) desc = (struct dma_desc *)(tx_q->dma_etx + entry); @@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p) struct stmmac_priv *priv = rx_q->priv_data; /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz == BUF_SIZE_16KiB) + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f9e83964aa7ea..bdbf86cb102af 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -188,6 +188,18 @@ struct stmmac_rfs_entry { int tc; }; +struct stmmac_dma_conf { + unsigned int dma_buf_sz; + + /* RX Queue */ + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; + unsigned int dma_rx_size; + + /* TX Queue */ + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + unsigned int dma_tx_size; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; @@ -201,7 +213,6 @@ struct stmmac_priv { int sph_cap; u32 sarc_type; - unsigned int dma_buf_sz; unsigned int rx_copybreak; u32 rx_riwt[MTL_MAX_TX_QUEUES]; int hwts_rx_en; @@ -213,13 +224,7 @@ struct stmmac_priv { int (*hwif_quirks)(struct stmmac_priv *priv); struct mutex lock; - /* RX Queue */ - struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; - unsigned int dma_rx_size; - - /* TX Queue */ - struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; - unsigned int dma_tx_size; + struct stmmac_dma_conf dma_conf; /* Generic channel for NAPI */ struct stmmac_channel channel[STMMAC_CH_MAX]; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9c3055ee26085..d6a44d53fe087 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -485,8 +485,8 @@ static void stmmac_get_ringparam(struct net_device *netdev, ring->rx_max_pending = DMA_MAX_RX_SIZE; ring->tx_max_pending = DMA_MAX_TX_SIZE; - ring->rx_pending = priv->dma_rx_size; - ring->tx_pending = priv->dma_tx_size; + ring->rx_pending = priv->dma_conf.dma_rx_size; + ring->tx_pending = priv->dma_conf.dma_tx_size; } static int stmmac_set_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5bd2e4d47ac5b..552d4b23cdb38 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -74,8 +74,8 @@ static int phyaddr = -1; module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); -#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) -#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) +#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) +#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) /* Limit to make sure XDP TX and slow path can coexist */ #define STMMAC_XSK_TX_BUDGET_MAX 256 @@ -234,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) /* synchronize_rcu() needed for pending XDP buffers to drain */ for (queue = 0; queue < rx_queues_cnt; queue++) { - rx_q = &priv->rx_queue[queue]; + rx_q = &priv->dma_conf.rx_queue[queue]; if (rx_q->xsk_pool) { synchronize_rcu(); break; @@ -360,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; u32 avail; if (tx_q->dirty_tx > tx_q->cur_tx) avail = tx_q->dirty_tx - tx_q->cur_tx - 1; else - avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; + avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; return avail; } @@ -378,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) */ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; u32 dirty; if (rx_q->dirty_rx <= rx_q->cur_rx) dirty = rx_q->cur_rx - rx_q->dirty_rx; else - dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; + dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; return dirty; } @@ -412,7 +412,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) /* check if all TX queues have the work finished */ for (queue = 0; queue < tx_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; if (tx_q->dirty_tx != tx_q->cur_tx) return -EBUSY; /* still unfinished work */ @@ -1232,7 +1232,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) /* Display RX rings */ for (queue = 0; queue < rx_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; pr_info("\tRX Queue %u rings\n", queue); @@ -1245,7 +1245,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) } /* Display RX ring */ - stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, + stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } } @@ -1259,7 +1259,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) /* Display TX rings */ for (queue = 0; queue < tx_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; pr_info("\tTX Queue %d rings\n", queue); @@ -1274,7 +1274,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) desc_size = sizeof(struct dma_desc); } - stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, + stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false, tx_q->dma_tx_phy, desc_size); } } @@ -1315,21 +1315,21 @@ static int stmmac_set_bfsize(int mtu, int bufsize) */ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int i; /* Clear the RX descriptors */ - for (i = 0; i < priv->dma_rx_size; i++) + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == priv->dma_rx_size - 1), - priv->dma_buf_sz); + (i == priv->dma_conf.dma_rx_size - 1), + priv->dma_conf.dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == priv->dma_rx_size - 1), - priv->dma_buf_sz); + (i == priv->dma_conf.dma_rx_size - 1), + priv->dma_conf.dma_buf_sz); } /** @@ -1341,12 +1341,12 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) */ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; int i; /* Clear the TX descriptors */ - for (i = 0; i < priv->dma_tx_size; i++) { - int last = (i == (priv->dma_tx_size - 1)); + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { + int last = (i == (priv->dma_conf.dma_tx_size - 1)); struct dma_desc *p; if (priv->extend_desc) @@ -1394,7 +1394,7 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, int i, gfp_t flags, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); @@ -1423,7 +1423,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->dma_buf_sz == BUF_SIZE_16KiB) + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); return 0; @@ -1437,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, */ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (buf->page) @@ -1457,7 +1457,7 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) */ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; if (tx_q->tx_skbuff_dma[i].buf && tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { @@ -1502,17 +1502,17 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) { int i; - for (i = 0; i < priv->dma_rx_size; i++) + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) stmmac_free_rx_buffer(priv, queue, i); } static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, gfp_t flags) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int i; - for (i = 0; i < priv->dma_rx_size; i++) { + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { struct dma_desc *p; int ret; @@ -1539,10 +1539,10 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, */ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int i; - for (i = 0; i < priv->dma_rx_size; i++) { + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (!buf->xdp) @@ -1555,10 +1555,10 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int i; - for (i = 0; i < priv->dma_rx_size; i++) { + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { struct stmmac_rx_buffer *buf; dma_addr_t dma_addr; struct dma_desc *p; @@ -1601,7 +1601,7 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q */ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int ret; netif_dbg(priv, probe, priv->dev, @@ -1647,11 +1647,11 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f if (priv->extend_desc) stmmac_mode_init(priv, rx_q->dma_erx, rx_q->dma_rx_phy, - priv->dma_rx_size, 1); + priv->dma_conf.dma_rx_size, 1); else stmmac_mode_init(priv, rx_q->dma_rx, rx_q->dma_rx_phy, - priv->dma_rx_size, 0); + priv->dma_conf.dma_rx_size, 0); } return 0; @@ -1678,7 +1678,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) err_init_rx_buffers: while (queue >= 0) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; if (rx_q->xsk_pool) dma_free_rx_xskbufs(priv, queue); @@ -1704,7 +1704,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) */ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; int i; netif_dbg(priv, probe, priv->dev, @@ -1716,16 +1716,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_mode_init(priv, tx_q->dma_etx, tx_q->dma_tx_phy, - priv->dma_tx_size, 1); + priv->dma_conf.dma_tx_size, 1); else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) stmmac_mode_init(priv, tx_q->dma_tx, tx_q->dma_tx_phy, - priv->dma_tx_size, 0); + priv->dma_conf.dma_tx_size, 0); } tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - for (i = 0; i < priv->dma_tx_size; i++) { + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1795,12 +1795,12 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) */ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; int i; tx_q->xsk_frames_done = 0; - for (i = 0; i < priv->dma_tx_size; i++) + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) stmmac_free_tx_buffer(priv, queue, i); if (tx_q->xsk_pool && tx_q->xsk_frames_done) { @@ -1830,7 +1830,7 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) */ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; /* Release the DMA RX socket buffers */ if (rx_q->xsk_pool) @@ -1843,11 +1843,11 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * sizeof(struct dma_desc), rx_q->dma_rx, rx_q->dma_rx_phy); else - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); @@ -1876,7 +1876,7 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) */ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; size_t size; void *addr; @@ -1894,7 +1894,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) addr = tx_q->dma_tx; } - size *= priv->dma_tx_size; + size *= priv->dma_conf.dma_tx_size; dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); @@ -1923,7 +1923,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv) */ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); struct page_pool_params pp_params = { 0 }; @@ -1935,8 +1935,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) rx_q->priv_data = priv; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = priv->dma_rx_size; - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); + pp_params.pool_size = priv->dma_conf.dma_rx_size; + num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; @@ -1951,7 +1951,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) return ret; } - rx_q->buf_pool = kcalloc(priv->dma_rx_size, + rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size, sizeof(*rx_q->buf_pool), GFP_KERNEL); if (!rx_q->buf_pool) @@ -1959,7 +1959,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) { rx_q->dma_erx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * + priv->dma_conf.dma_rx_size * sizeof(struct dma_extended_desc), &rx_q->dma_rx_phy, GFP_KERNEL); @@ -1968,7 +1968,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) } else { rx_q->dma_rx = dma_alloc_coherent(priv->device, - priv->dma_rx_size * + priv->dma_conf.dma_rx_size * sizeof(struct dma_desc), &rx_q->dma_rx_phy, GFP_KERNEL); @@ -2025,20 +2025,20 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) */ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; size_t size; void *addr; tx_q->queue_index = queue; tx_q->priv_data = priv; - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, + tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size, sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) return -ENOMEM; - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, + tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!tx_q->tx_skbuff) @@ -2051,7 +2051,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) else size = sizeof(struct dma_desc); - size *= priv->dma_tx_size; + size *= priv->dma_conf.dma_tx_size; addr = dma_alloc_coherent(priv->device, size, &tx_q->dma_tx_phy, GFP_KERNEL); @@ -2295,7 +2295,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /* configure all channels */ for (chan = 0; chan < rx_channels_count; chan++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; u32 buf_size; qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; @@ -2310,7 +2310,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) chan); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, - priv->dma_buf_sz, + priv->dma_conf.dma_buf_sz, chan); } } @@ -2326,7 +2326,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct xsk_buff_pool *pool = tx_q->xsk_pool; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc = NULL; @@ -2401,7 +2401,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) stmmac_enable_dma_transmission(priv, priv->ioaddr); - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } @@ -2442,7 +2442,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) */ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; @@ -2455,7 +2455,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) entry = tx_q->dirty_tx; /* Try to clean all TX complete frame in 1 shot */ - while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) { + while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { struct xdp_frame *xdpf; struct sk_buff *skb; struct dma_desc *p; @@ -2557,7 +2557,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) stmmac_release_tx_desc(priv, p, priv->mode); - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); } tx_q->dirty_tx = entry; @@ -2622,7 +2622,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) */ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); @@ -2689,8 +2689,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) { int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, &priv->xstats, chan, dir); - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; struct stmmac_channel *ch = &priv->channel[chan]; struct napi_struct *rx_napi; struct napi_struct *tx_napi; @@ -2856,7 +2856,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_channels_count; chan++) { - rx_q = &priv->rx_queue[chan]; + rx_q = &priv->dma_conf.rx_queue[chan]; stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, chan); @@ -2870,7 +2870,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) /* DMA TX Channel Configuration */ for (chan = 0; chan < tx_channels_count; chan++) { - tx_q = &priv->tx_queue[chan]; + tx_q = &priv->dma_conf.tx_queue[chan]; stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); @@ -2885,7 +2885,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), @@ -2935,7 +2935,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv) u32 chan; for (chan = 0; chan < tx_channel_count; chan++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; @@ -2957,12 +2957,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) /* set TX ring length */ for (chan = 0; chan < tx_channels_count; chan++) stmmac_set_tx_ring_len(priv, priv->ioaddr, - (priv->dma_tx_size - 1), chan); + (priv->dma_conf.dma_tx_size - 1), chan); /* set RX ring length */ for (chan = 0; chan < rx_channels_count; chan++) stmmac_set_rx_ring_len(priv, priv->ioaddr, - (priv->dma_rx_size - 1), chan); + (priv->dma_conf.dma_rx_size - 1), chan); } /** @@ -3297,7 +3297,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) /* Enable TSO */ if (priv->tso) { for (chan = 0; chan < tx_cnt; chan++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; /* TSO and TBS cannot co-exist */ if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -3319,7 +3319,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) /* TBS */ for (chan = 0; chan < tx_cnt; chan++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; int enable = tx_q->tbs & STMMAC_TBS_AVAIL; stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); @@ -3363,7 +3363,7 @@ static void stmmac_free_irq(struct net_device *dev, for (j = irq_idx - 1; j >= 0; j--) { if (priv->tx_irq[j] > 0) { irq_set_affinity_hint(priv->tx_irq[j], NULL); - free_irq(priv->tx_irq[j], &priv->tx_queue[j]); + free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); } } irq_idx = priv->plat->rx_queues_to_use; @@ -3372,7 +3372,7 @@ static void stmmac_free_irq(struct net_device *dev, for (j = irq_idx - 1; j >= 0; j--) { if (priv->rx_irq[j] > 0) { irq_set_affinity_hint(priv->rx_irq[j], NULL); - free_irq(priv->rx_irq[j], &priv->rx_queue[j]); + free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); } } @@ -3507,7 +3507,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); ret = request_irq(priv->rx_irq[i], stmmac_msi_intr_rx, - 0, int_name, &priv->rx_queue[i]); + 0, int_name, &priv->dma_conf.rx_queue[i]); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc rx-%d MSI %d (error: %d)\n", @@ -3532,7 +3532,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); ret = request_irq(priv->tx_irq[i], stmmac_msi_intr_tx, - 0, int_name, &priv->tx_queue[i]); + 0, int_name, &priv->dma_conf.tx_queue[i]); if (unlikely(ret < 0)) { netdev_err(priv->dev, "%s: alloc tx-%d MSI %d (error: %d)\n", @@ -3661,21 +3661,21 @@ static int stmmac_open(struct net_device *dev) bfsize = 0; if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz); - priv->dma_buf_sz = bfsize; + priv->dma_conf.dma_buf_sz = bfsize; buf_sz = bfsize; priv->rx_copybreak = STMMAC_RX_COPYBREAK; - if (!priv->dma_tx_size) - priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; - if (!priv->dma_rx_size) - priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; + if (!priv->dma_conf.dma_tx_size) + priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE; + if (!priv->dma_conf.dma_rx_size) + priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE; /* Earlier check for TBS */ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; /* Setup per-TXQ tbs flag before TX descriptor alloc */ @@ -3724,7 +3724,7 @@ static int stmmac_open(struct net_device *dev) phylink_stop(priv->phylink); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); stmmac_hw_teardown(dev); init_error: @@ -3766,7 +3766,7 @@ static int stmmac_release(struct net_device *dev) stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); netif_tx_disable(dev); @@ -3826,7 +3826,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, return false; stmmac_set_tx_owner(priv, p); - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); return true; } @@ -3844,7 +3844,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, int total_len, bool last_segment, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct dma_desc *desc; u32 buff_size; int tmp_len; @@ -3855,7 +3855,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, dma_addr_t curr_addr; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, - priv->dma_tx_size); + priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -3883,7 +3883,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; int desc_size; if (likely(priv->extend_desc)) @@ -3945,7 +3945,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t des; int i; - tx_q = &priv->tx_queue[queue]; + tx_q = &priv->dma_conf.tx_queue[queue]; first_tx = tx_q->cur_tx; /* Compute header lengths */ @@ -3985,7 +3985,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, - priv->dma_tx_size); + priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); } @@ -4097,7 +4097,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", @@ -4185,7 +4185,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int entry, first_tx; dma_addr_t des; - tx_q = &priv->tx_queue[queue]; + tx_q = &priv->dma_conf.tx_queue[queue]; first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) @@ -4248,7 +4248,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int len = skb_frag_size(frag); bool last_segment = (i == (nfrags - 1)); - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); WARN_ON(tx_q->tx_skbuff[entry]); if (likely(priv->extend_desc)) @@ -4319,7 +4319,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { @@ -4434,7 +4434,7 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) */ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; int dirty = stmmac_rx_dirty(priv, queue); unsigned int entry = rx_q->dirty_rx; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); @@ -4488,7 +4488,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); } rx_q->dirty_rx = entry; rx_q->rx_tail_addr = rx_q->dma_rx_phy + @@ -4516,12 +4516,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, /* First descriptor, not last descriptor and not split header */ if (status & rx_not_ls) - return priv->dma_buf_sz; + return priv->dma_conf.dma_buf_sz; plen = stmmac_get_rx_frame_len(priv, p, coe); /* First descriptor and last descriptor and not split header */ - return min_t(unsigned int, priv->dma_buf_sz, plen); + return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); } static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, @@ -4537,7 +4537,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, /* Not last descriptor */ if (status & rx_not_ls) - return priv->dma_buf_sz; + return priv->dma_conf.dma_buf_sz; plen = stmmac_get_rx_frame_len(priv, p, coe); @@ -4548,7 +4548,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, struct xdp_frame *xdpf, bool dma_map) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; unsigned int entry = tx_q->cur_tx; struct dma_desc *tx_desc; dma_addr_t dma_addr; @@ -4611,7 +4611,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, stmmac_enable_dma_transmission(priv, priv->ioaddr); - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; return STMMAC_XDP_TX; @@ -4785,7 +4785,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int entry = rx_q->dirty_rx; struct dma_desc *rx_desc = NULL; bool ret = true; @@ -4828,7 +4828,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) dma_wmb(); stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); } if (rx_desc) { @@ -4843,7 +4843,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; unsigned int count = 0, error = 0, len = 0; int dirty = stmmac_rx_dirty(priv, queue); unsigned int next_entry = rx_q->cur_rx; @@ -4865,7 +4865,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) desc_size = sizeof(struct dma_desc); } - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } while (count < limit) { @@ -4912,7 +4912,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) /* Prefetch the next RX descriptor */ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, - priv->dma_rx_size); + priv->dma_conf.dma_rx_size); next_entry = rx_q->cur_rx; if (priv->extend_desc) @@ -5033,7 +5033,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) */ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned int count = 0, error = 0, len = 0; int status = 0, coe = priv->hw->rx_csum; @@ -5046,7 +5046,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) int buf_sz; dma_dir = page_pool_get_dma_dir(rx_q->page_pool); - buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; if (netif_msg_rx_status(priv)) { void *rx_head; @@ -5060,7 +5060,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) desc_size = sizeof(struct dma_desc); } - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } while (count < limit) { @@ -5104,7 +5104,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) break; rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, - priv->dma_rx_size); + priv->dma_conf.dma_rx_size); next_entry = rx_q->cur_rx; if (priv->extend_desc) @@ -5239,7 +5239,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) buf1_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, - priv->dma_buf_sz); + priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ page_pool_release_page(rx_q->page_pool, buf->page); @@ -5251,7 +5251,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) buf2_len, dma_dir); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, - priv->dma_buf_sz); + priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ page_pool_release_page(rx_q->page_pool, buf->sec_page); @@ -5693,11 +5693,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; + struct stmmac_dma_conf *dma_conf; int chan = tx_q->queue_index; struct stmmac_priv *priv; int status; - priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); + dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); if (unlikely(!data)) { netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); @@ -5723,10 +5725,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) { struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; + struct stmmac_dma_conf *dma_conf; int chan = rx_q->queue_index; struct stmmac_priv *priv; - priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); + dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); + priv = container_of(dma_conf, struct stmmac_priv, dma_conf); if (unlikely(!data)) { netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); @@ -5757,10 +5761,10 @@ static void stmmac_poll_controller(struct net_device *dev) if (priv->plat->multi_msi_en) { for (i = 0; i < priv->plat->rx_queues_to_use; i++) - stmmac_msi_intr_rx(0, &priv->rx_queue[i]); + stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); for (i = 0; i < priv->plat->tx_queues_to_use; i++) - stmmac_msi_intr_tx(0, &priv->tx_queue[i]); + stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); } else { disable_irq(dev->irq); stmmac_interrupt(dev->irq, dev); @@ -5939,34 +5943,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v) return 0; for (queue = 0; queue < rx_count; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; seq_printf(seq, "RX Queue %d:\n", queue); if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_erx, - priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); + priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); } else { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)rx_q->dma_rx, - priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); + priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); } } for (queue = 0; queue < tx_count; queue++) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; seq_printf(seq, "TX Queue %d:\n", queue); if (priv->extend_desc) { seq_printf(seq, "Extended descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_etx, - priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); + priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { seq_printf(seq, "Descriptor ring:\n"); sysfs_display_ring((void *)tx_q->dma_tx, - priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); + priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); } } @@ -6305,7 +6309,7 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; u32 buf_size; @@ -6342,7 +6346,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) rx_q->queue_index); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, - priv->dma_buf_sz, + priv->dma_conf.dma_buf_sz, rx_q->queue_index); } @@ -6368,7 +6372,7 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; unsigned long flags; int ret; @@ -6415,7 +6419,7 @@ void stmmac_xdp_release(struct net_device *dev) stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); @@ -6474,7 +6478,7 @@ int stmmac_xdp_open(struct net_device *dev) /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_cnt; chan++) { - rx_q = &priv->rx_queue[chan]; + rx_q = &priv->dma_conf.rx_queue[chan]; stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, chan); @@ -6492,7 +6496,7 @@ int stmmac_xdp_open(struct net_device *dev) rx_q->queue_index); } else { stmmac_set_dma_bfsize(priv, priv->ioaddr, - priv->dma_buf_sz, + priv->dma_conf.dma_buf_sz, rx_q->queue_index); } @@ -6501,7 +6505,7 @@ int stmmac_xdp_open(struct net_device *dev) /* DMA TX Channel Configuration */ for (chan = 0; chan < tx_cnt; chan++) { - tx_q = &priv->tx_queue[chan]; + tx_q = &priv->dma_conf.tx_queue[chan]; stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); @@ -6534,7 +6538,7 @@ int stmmac_xdp_open(struct net_device *dev) irq_error: for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); stmmac_hw_teardown(dev); init_error: @@ -6561,8 +6565,8 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) queue >= priv->plat->tx_queues_to_use) return -EINVAL; - rx_q = &priv->rx_queue[queue]; - tx_q = &priv->tx_queue[queue]; + rx_q = &priv->dma_conf.rx_queue[queue]; + tx_q = &priv->dma_conf.tx_queue[queue]; ch = &priv->channel[queue]; if (!rx_q->xsk_pool && !tx_q->xsk_pool) @@ -6817,8 +6821,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) if (netif_running(dev)) stmmac_release(dev); - priv->dma_rx_size = rx_size; - priv->dma_tx_size = tx_size; + priv->dma_conf.dma_rx_size = rx_size; + priv->dma_conf.dma_tx_size = tx_size; if (netif_running(dev)) ret = stmmac_open(dev); @@ -7265,7 +7269,7 @@ int stmmac_suspend(struct device *dev) stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - hrtimer_cancel(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; @@ -7316,7 +7320,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend); static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; rx_q->cur_rx = 0; rx_q->dirty_rx = 0; @@ -7324,7 +7328,7 @@ static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; tx_q->cur_tx = 0; tx_q->dirty_tx = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 2fc51dc5eb0bb..49af7e78b7f59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv) struct stmmac_channel *ch = &priv->channel[i]; u32 tail; - tail = priv->rx_queue[i].dma_rx_phy + - (priv->dma_rx_size * sizeof(struct dma_desc)); + tail = priv->dma_conf.rx_queue[i].dma_rx_phy + + (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i); stmmac_start_rx(priv, priv->ioaddr, i); @@ -1680,7 +1680,7 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv) static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue) { struct stmmac_packet_attrs attr = { }; - int size = priv->dma_buf_sz; + int size = priv->dma_conf.dma_buf_sz; attr.dst = priv->dev->dev_addr; attr.max_size = size - ETH_FCS_LEN; @@ -1763,7 +1763,7 @@ static int stmmac_test_tbs(struct stmmac_priv *priv) /* Find first TBS enabled Queue, if any */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) - if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL) break; if (i >= priv->plat->tx_queues_to_use) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index d61766eeac6d7..773e415cc2de6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -1091,13 +1091,13 @@ static int tc_setup_etf(struct stmmac_priv *priv, return -EOPNOTSUPP; if (qopt->queue >= priv->plat->tx_queues_to_use) return -EINVAL; - if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) + if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL)) return -EINVAL; if (qopt->enable) - priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; + priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN; else - priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; + priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN; netdev_info(priv->dev, "%s ETF for Queue %d\n", qopt->enable ? "enabled" : "disabled", qopt->queue); From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 23 Jul 2022 16:29:32 +0200 Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf before open Rework the driver to generate the stmmac dma_conf before stmmac_open. This permits a function to first check if it's possible to allocate a new dma_config and then pass it directly to __stmmac_open and "open" the interface with the new configuration. Signed-off-by: Christian Marangi Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++------- 1 file changed, 289 insertions(+), 173 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 552d4b23cdb38..81bbc8a1eb2f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1223,7 +1223,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) return 0; } -static void stmmac_display_rx_rings(struct stmmac_priv *priv) +static void stmmac_display_rx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int desc_size; @@ -1232,7 +1233,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) /* Display RX rings */ for (queue = 0; queue < rx_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; pr_info("\tRX Queue %u rings\n", queue); @@ -1245,12 +1246,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) } /* Display RX ring */ - stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true, + stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, rx_q->dma_rx_phy, desc_size); } } -static void stmmac_display_tx_rings(struct stmmac_priv *priv) +static void stmmac_display_tx_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 tx_cnt = priv->plat->tx_queues_to_use; unsigned int desc_size; @@ -1259,7 +1261,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) /* Display TX rings */ for (queue = 0; queue < tx_cnt; queue++) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; pr_info("\tTX Queue %d rings\n", queue); @@ -1274,18 +1276,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) desc_size = sizeof(struct dma_desc); } - stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false, + stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, tx_q->dma_tx_phy, desc_size); } } -static void stmmac_display_rings(struct stmmac_priv *priv) +static void stmmac_display_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { /* Display RX ring */ - stmmac_display_rx_rings(priv); + stmmac_display_rx_rings(priv, dma_conf); /* Display TX ring */ - stmmac_display_tx_rings(priv); + stmmac_display_tx_rings(priv, dma_conf); } static int stmmac_set_bfsize(int mtu, int bufsize) @@ -1309,44 +1312,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize) /** * stmmac_clear_rx_descriptors - clear RX descriptors * @priv: driver private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index * Description: this function is called to clear the RX descriptors * in case of both basic and extended descriptors are used. */ -static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; /* Clear the RX descriptors */ - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) + for (i = 0; i < dma_conf->dma_rx_size; i++) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == priv->dma_conf.dma_rx_size - 1), - priv->dma_conf.dma_buf_sz); + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == priv->dma_conf.dma_rx_size - 1), - priv->dma_conf.dma_buf_sz); + (i == dma_conf->dma_rx_size - 1), + dma_conf->dma_buf_sz); } /** * stmmac_clear_tx_descriptors - clear tx descriptors * @priv: driver private structure + * @dma_conf: structure to take the dma data * @queue: TX queue index. * Description: this function is called to clear the TX descriptors * in case of both basic and extended descriptors are used. */ -static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; /* Clear the TX descriptors */ - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { - int last = (i == (priv->dma_conf.dma_tx_size - 1)); + for (i = 0; i < dma_conf->dma_tx_size; i++) { + int last = (i == (dma_conf->dma_tx_size - 1)); struct dma_desc *p; if (priv->extend_desc) @@ -1363,10 +1372,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) /** * stmmac_clear_descriptors - clear descriptors * @priv: driver private structure + * @dma_conf: structure to take the dma data * Description: this function is called to clear the TX and RX descriptors * in case of both basic and extended descriptors are used. */ -static void stmmac_clear_descriptors(struct stmmac_priv *priv) +static void stmmac_clear_descriptors(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 rx_queue_cnt = priv->plat->rx_queues_to_use; u32 tx_queue_cnt = priv->plat->tx_queues_to_use; @@ -1374,16 +1385,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) /* Clear the RX descriptors */ for (queue = 0; queue < rx_queue_cnt; queue++) - stmmac_clear_rx_descriptors(priv, queue); + stmmac_clear_rx_descriptors(priv, dma_conf, queue); /* Clear the TX descriptors */ for (queue = 0; queue < tx_queue_cnt; queue++) - stmmac_clear_tx_descriptors(priv, queue); + stmmac_clear_tx_descriptors(priv, dma_conf, queue); } /** * stmmac_init_rx_buffers - init the RX descriptor buffer. * @priv: driver private structure + * @dma_conf: structure to take the dma data * @p: descriptor pointer * @i: descriptor index * @flags: gfp flag @@ -1391,10 +1403,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) * Description: this function is called to allocate a receive buffer, perform * the DMA mapping and init the descriptor. */ -static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, +static int stmmac_init_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + struct dma_desc *p, int i, gfp_t flags, u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); @@ -1423,7 +1437,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB) + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); return 0; @@ -1432,12 +1446,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, /** * stmmac_free_rx_buffer - free RX dma buffers * @priv: private structure - * @queue: RX queue index + * @rx_q: RX queue * @i: buffer index. */ -static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, + struct stmmac_rx_queue *rx_q, + int i) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (buf->page) @@ -1452,12 +1467,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) /** * stmmac_free_tx_buffer - free RX dma buffers * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index * @i: buffer index. */ -static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, int i) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; if (tx_q->tx_skbuff_dma[i].buf && tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { @@ -1496,23 +1514,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) /** * dma_free_rx_skbufs - free RX dma buffers * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index */ -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) +static void dma_free_rx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) - stmmac_free_rx_buffer(priv, queue, i); + for (i = 0; i < dma_conf->dma_rx_size; i++) + stmmac_free_rx_buffer(priv, rx_q, i); } -static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, - gfp_t flags) +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { + for (i = 0; i < dma_conf->dma_rx_size; i++) { struct dma_desc *p; int ret; @@ -1521,7 +1544,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, else p = rx_q->dma_rx + i; - ret = stmmac_init_rx_buffers(priv, p, i, flags, + ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, queue); if (ret) return ret; @@ -1535,14 +1558,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue, /** * dma_free_rx_xskbufs - free RX dma buffers from XSK pool * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index */ -static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) +static void dma_free_rx_xskbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { + for (i = 0; i < dma_conf->dma_rx_size; i++) { struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (!buf->xdp) @@ -1553,12 +1579,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue) } } -static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue) +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int i; - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) { + for (i = 0; i < dma_conf->dma_rx_size; i++) { struct stmmac_rx_buffer *buf; dma_addr_t dma_addr; struct dma_desc *p; @@ -1593,22 +1621,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q /** * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) * @priv: driver private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index * @flags: gfp flag. * Description: this function initializes the DMA RX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags) +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue, gfp_t flags) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; int ret; netif_dbg(priv, probe, priv->dev, "(%s) dma_rx_phy=0x%08x\n", __func__, (u32)rx_q->dma_rx_phy); - stmmac_clear_rx_descriptors(priv, queue); + stmmac_clear_rx_descriptors(priv, dma_conf, queue); xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); @@ -1635,9 +1666,9 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f /* RX XDP ZC buffer pool may not be populated, e.g. * xdpsock TX-only. */ - stmmac_alloc_rx_buffers_zc(priv, queue); + stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); } else { - ret = stmmac_alloc_rx_buffers(priv, queue, flags); + ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); if (ret < 0) return -ENOMEM; } @@ -1647,17 +1678,19 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f if (priv->extend_desc) stmmac_mode_init(priv, rx_q->dma_erx, rx_q->dma_rx_phy, - priv->dma_conf.dma_rx_size, 1); + dma_conf->dma_rx_size, 1); else stmmac_mode_init(priv, rx_q->dma_rx, rx_q->dma_rx_phy, - priv->dma_conf.dma_rx_size, 0); + dma_conf->dma_rx_size, 0); } return 0; } -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) +static int init_dma_rx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; @@ -1669,7 +1702,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) "SKB addresses:\nskb\t\tskb data\tdma data\n"); for (queue = 0; queue < rx_count; queue++) { - ret = __init_dma_rx_desc_rings(priv, queue, flags); + ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); if (ret) goto err_init_rx_buffers; } @@ -1678,12 +1711,12 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) err_init_rx_buffers: while (queue >= 0) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; if (rx_q->xsk_pool) - dma_free_rx_xskbufs(priv, queue); + dma_free_rx_xskbufs(priv, dma_conf, queue); else - dma_free_rx_skbufs(priv, queue); + dma_free_rx_skbufs(priv, dma_conf, queue); rx_q->buf_alloc_num = 0; rx_q->xsk_pool = NULL; @@ -1697,14 +1730,17 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) /** * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) * @priv: driver private structure - * @queue : TX queue index + * @dma_conf: structure to take the dma data + * @queue: TX queue index * Description: this function initializes the DMA TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; netif_dbg(priv, probe, priv->dev, @@ -1716,16 +1752,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_mode_init(priv, tx_q->dma_etx, tx_q->dma_tx_phy, - priv->dma_conf.dma_tx_size, 1); + dma_conf->dma_tx_size, 1); else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) stmmac_mode_init(priv, tx_q->dma_tx, tx_q->dma_tx_phy, - priv->dma_conf.dma_tx_size, 0); + dma_conf->dma_tx_size, 0); } tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) { + for (i = 0; i < dma_conf->dma_tx_size; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1747,7 +1783,8 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) return 0; } -static int init_dma_tx_desc_rings(struct net_device *dev) +static int init_dma_tx_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) { struct stmmac_priv *priv = netdev_priv(dev); u32 tx_queue_cnt; @@ -1756,7 +1793,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) tx_queue_cnt = priv->plat->tx_queues_to_use; for (queue = 0; queue < tx_queue_cnt; queue++) - __init_dma_tx_desc_rings(priv, queue); + __init_dma_tx_desc_rings(priv, dma_conf, queue); return 0; } @@ -1764,26 +1801,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev) /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure + * @dma_conf: structure to take the dma data * @flags: gfp flag. * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. It supports the chained and ring * modes. */ -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) +static int init_dma_desc_rings(struct net_device *dev, + struct stmmac_dma_conf *dma_conf, + gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); int ret; - ret = init_dma_rx_desc_rings(dev, flags); + ret = init_dma_rx_desc_rings(dev, dma_conf, flags); if (ret) return ret; - ret = init_dma_tx_desc_rings(dev); + ret = init_dma_tx_desc_rings(dev, dma_conf); - stmmac_clear_descriptors(priv); + stmmac_clear_descriptors(priv, dma_conf); if (netif_msg_hw(priv)) - stmmac_display_rings(priv); + stmmac_display_rings(priv, dma_conf); return ret; } @@ -1791,17 +1831,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) /** * dma_free_tx_skbufs - free TX dma buffers * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: TX queue index */ -static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) +static void dma_free_tx_skbufs(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; int i; tx_q->xsk_frames_done = 0; - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) - stmmac_free_tx_buffer(priv, queue, i); + for (i = 0; i < dma_conf->dma_tx_size; i++) + stmmac_free_tx_buffer(priv, dma_conf, queue, i); if (tx_q->xsk_pool && tx_q->xsk_frames_done) { xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); @@ -1820,34 +1863,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) u32 queue; for (queue = 0; queue < tx_queue_cnt; queue++) - dma_free_tx_skbufs(priv, queue); + dma_free_tx_skbufs(priv, &priv->dma_conf, queue); } /** * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index */ -static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; /* Release the DMA RX socket buffers */ if (rx_q->xsk_pool) - dma_free_rx_xskbufs(priv, queue); + dma_free_rx_xskbufs(priv, dma_conf, queue); else - dma_free_rx_skbufs(priv, queue); + dma_free_rx_skbufs(priv, dma_conf, queue); rx_q->buf_alloc_num = 0; rx_q->xsk_pool = NULL; /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * + dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_desc), rx_q->dma_rx, rx_q->dma_rx_phy); else - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size * + dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); @@ -1859,29 +1905,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) page_pool_destroy(rx_q->page_pool); } -static void free_dma_rx_desc_resources(struct stmmac_priv *priv) +static void free_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; /* Free RX queue resources */ for (queue = 0; queue < rx_count; queue++) - __free_dma_rx_desc_resources(priv, queue); + __free_dma_rx_desc_resources(priv, dma_conf, queue); } /** * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: TX queue index */ -static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; size_t size; void *addr; /* Release the DMA TX socket buffers */ - dma_free_tx_skbufs(priv, queue); + dma_free_tx_skbufs(priv, dma_conf, queue); if (priv->extend_desc) { size = sizeof(struct dma_extended_desc); @@ -1894,7 +1944,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) addr = tx_q->dma_tx; } - size *= priv->dma_conf.dma_tx_size; + size *= dma_conf->dma_tx_size; dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); @@ -1902,28 +1952,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) kfree(tx_q->tx_skbuff); } -static void free_dma_tx_desc_resources(struct stmmac_priv *priv) +static void free_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; /* Free TX queue resources */ for (queue = 0; queue < tx_count; queue++) - __free_dma_tx_desc_resources(priv, queue); + __free_dma_tx_desc_resources(priv, dma_conf, queue); } /** * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: RX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); struct page_pool_params pp_params = { 0 }; @@ -1935,8 +1989,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) rx_q->priv_data = priv; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = priv->dma_conf.dma_rx_size; - num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE); + pp_params.pool_size = dma_conf->dma_rx_size; + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; @@ -1951,7 +2005,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) return ret; } - rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size, + rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, sizeof(*rx_q->buf_pool), GFP_KERNEL); if (!rx_q->buf_pool) @@ -1959,7 +2013,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) { rx_q->dma_erx = dma_alloc_coherent(priv->device, - priv->dma_conf.dma_rx_size * + dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), &rx_q->dma_rx_phy, GFP_KERNEL); @@ -1968,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) } else { rx_q->dma_rx = dma_alloc_coherent(priv->device, - priv->dma_conf.dma_rx_size * + dma_conf->dma_rx_size * sizeof(struct dma_desc), &rx_q->dma_rx_phy, GFP_KERNEL); @@ -1993,7 +2047,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue) return 0; } -static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 rx_count = priv->plat->rx_queues_to_use; u32 queue; @@ -2001,7 +2056,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) /* RX queues buffers and DMA */ for (queue = 0; queue < rx_count; queue++) { - ret = __alloc_dma_rx_desc_resources(priv, queue); + ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); if (ret) goto err_dma; } @@ -2009,7 +2064,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) return 0; err_dma: - free_dma_rx_desc_resources(priv); + free_dma_rx_desc_resources(priv, dma_conf); return ret; } @@ -2017,28 +2072,31 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) /** * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). * @priv: private structure + * @dma_conf: structure to take the dma data * @queue: TX queue index * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf, + u32 queue) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; size_t size; void *addr; tx_q->queue_index = queue; tx_q->priv_data = priv; - tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size, + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) return -ENOMEM; - tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size, + tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, sizeof(struct sk_buff *), GFP_KERNEL); if (!tx_q->tx_skbuff) @@ -2051,7 +2109,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) else size = sizeof(struct dma_desc); - size *= priv->dma_conf.dma_tx_size; + size *= dma_conf->dma_tx_size; addr = dma_alloc_coherent(priv->device, size, &tx_q->dma_tx_phy, GFP_KERNEL); @@ -2068,7 +2126,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue) return 0; } -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { u32 tx_count = priv->plat->tx_queues_to_use; u32 queue; @@ -2076,7 +2135,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) /* TX queues buffers and DMA */ for (queue = 0; queue < tx_count; queue++) { - ret = __alloc_dma_tx_desc_resources(priv, queue); + ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); if (ret) goto err_dma; } @@ -2084,27 +2143,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) return 0; err_dma: - free_dma_tx_desc_resources(priv); + free_dma_tx_desc_resources(priv, dma_conf); return ret; } /** * alloc_dma_desc_resources - alloc TX/RX resources. * @priv: private structure + * @dma_conf: structure to take the dma data * Description: according to which descriptor can be used (extend or basic) * this function allocates the resources for TX and RX paths. In case of * reception, for example, it pre-allocated the RX socket buffer in order to * allow zero-copy mechanism. */ -static int alloc_dma_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { /* RX Allocation */ - int ret = alloc_dma_rx_desc_resources(priv); + int ret = alloc_dma_rx_desc_resources(priv, dma_conf); if (ret) return ret; - ret = alloc_dma_tx_desc_resources(priv); + ret = alloc_dma_tx_desc_resources(priv, dma_conf); return ret; } @@ -2112,16 +2173,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) /** * free_dma_desc_resources - free dma desc resources * @priv: private structure + * @dma_conf: structure to take the dma data */ -static void free_dma_desc_resources(struct stmmac_priv *priv) +static void free_dma_desc_resources(struct stmmac_priv *priv, + struct stmmac_dma_conf *dma_conf) { /* Release the DMA TX socket buffers */ - free_dma_tx_desc_resources(priv); + free_dma_tx_desc_resources(priv, dma_conf); /* Release the DMA RX socket buffers later * to ensure all pending XDP_TX buffers are returned. */ - free_dma_rx_desc_resources(priv); + free_dma_rx_desc_resources(priv, dma_conf); } /** @@ -2627,8 +2690,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); stmmac_stop_tx_dma(priv, chan); - dma_free_tx_skbufs(priv, chan); - stmmac_clear_tx_descriptors(priv, chan); + dma_free_tx_skbufs(priv, &priv->dma_conf, chan); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); stmmac_reset_tx_queue(priv, chan); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); @@ -3619,19 +3682,93 @@ static int stmmac_request_irq(struct net_device *dev) } /** - * stmmac_open - open entry point of the driver + * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue + * @priv: driver private structure + * @mtu: MTU to setup the dma queue and buf with + * Description: Allocate and generate a dma_conf based on the provided MTU. + * Allocate the Tx/Rx DMA queue and init them. + * Return value: + * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. + */ +static struct stmmac_dma_conf * +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) +{ + struct stmmac_dma_conf *dma_conf; + int chan, bfsize, ret; + + dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); + if (!dma_conf) { + netdev_err(priv->dev, "%s: DMA conf allocation failed\n", + __func__); + return ERR_PTR(-ENOMEM); + } + + bfsize = stmmac_set_16kib_bfsize(priv, mtu); + if (bfsize < 0) + bfsize = 0; + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(mtu, 0); + + dma_conf->dma_buf_sz = bfsize; + /* Chose the tx/rx size from the already defined one in the + * priv struct. (if defined) + */ + dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; + dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; + + if (!dma_conf->dma_tx_size) + dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; + if (!dma_conf->dma_rx_size) + dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; + + /* Earlier check for TBS */ + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; + + /* Setup per-TXQ tbs flag before TX descriptor alloc */ + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; + } + + ret = alloc_dma_desc_resources(priv, dma_conf); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", + __func__); + goto alloc_error; + } + + ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); + if (ret < 0) { + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", + __func__); + goto init_error; + } + + return dma_conf; + +init_error: + free_dma_desc_resources(priv, dma_conf); +alloc_error: + kfree(dma_conf); + return ERR_PTR(ret); +} + +/** + * __stmmac_open - open entry point of the driver * @dev : pointer to the device structure. + * @dma_conf : structure to take the dma data * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -static int stmmac_open(struct net_device *dev) +static int __stmmac_open(struct net_device *dev, + struct stmmac_dma_conf *dma_conf) { struct stmmac_priv *priv = netdev_priv(dev); int mode = priv->plat->phy_interface; - int bfsize = 0; u32 chan; int ret; @@ -3656,45 +3793,10 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); - if (bfsize < 0) - bfsize = 0; - - if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz); - - priv->dma_conf.dma_buf_sz = bfsize; - buf_sz = bfsize; - priv->rx_copybreak = STMMAC_RX_COPYBREAK; - if (!priv->dma_conf.dma_tx_size) - priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE; - if (!priv->dma_conf.dma_rx_size) - priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE; - - /* Earlier check for TBS */ - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; - int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; - - /* Setup per-TXQ tbs flag before TX descriptor alloc */ - tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; - } - - ret = alloc_dma_desc_resources(priv); - if (ret < 0) { - netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", - __func__); - goto dma_desc_error; - } - - ret = init_dma_desc_rings(dev, GFP_KERNEL); - if (ret < 0) { - netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", - __func__); - goto init_error; - } + buf_sz = dma_conf->dma_buf_sz; + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); stmmac_reset_queues_param(priv); @@ -3728,14 +3830,28 @@ static int stmmac_open(struct net_device *dev) stmmac_hw_teardown(dev); init_error: - free_dma_desc_resources(priv); -dma_desc_error: + free_dma_desc_resources(priv, &priv->dma_conf); phylink_disconnect_phy(priv->phylink); init_phy_error: pm_runtime_put(priv->device); return ret; } +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct stmmac_dma_conf *dma_conf; + int ret; + + dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); + if (IS_ERR(dma_conf)) + return PTR_ERR(dma_conf); + + ret = __stmmac_open(dev, dma_conf); + kfree(dma_conf); + return ret; +} + static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) { set_bit(__FPE_REMOVING, &priv->fpe_task_state); @@ -3782,7 +3898,7 @@ static int stmmac_release(struct net_device *dev) stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ - free_dma_desc_resources(priv); + free_dma_desc_resources(priv, &priv->dma_conf); /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); @@ -6304,7 +6420,7 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) spin_unlock_irqrestore(&ch->lock, flags); stmmac_stop_rx_dma(priv, queue); - __free_dma_rx_desc_resources(priv, queue); + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); } void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) @@ -6315,21 +6431,21 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) u32 buf_size; int ret; - ret = __alloc_dma_rx_desc_resources(priv, queue); + ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); if (ret) { netdev_err(priv->dev, "Failed to alloc RX desc.\n"); return; } - ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL); + ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); if (ret) { - __free_dma_rx_desc_resources(priv, queue); + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); netdev_err(priv->dev, "Failed to init RX desc.\n"); return; } stmmac_reset_rx_queue(priv, queue); - stmmac_clear_rx_descriptors(priv, queue); + stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q->dma_rx_phy, rx_q->queue_index); @@ -6367,7 +6483,7 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) spin_unlock_irqrestore(&ch->lock, flags); stmmac_stop_tx_dma(priv, queue); - __free_dma_tx_desc_resources(priv, queue); + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); } void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) @@ -6377,21 +6493,21 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) unsigned long flags; int ret; - ret = __alloc_dma_tx_desc_resources(priv, queue); + ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); if (ret) { netdev_err(priv->dev, "Failed to alloc TX desc.\n"); return; } - ret = __init_dma_tx_desc_rings(priv, queue); + ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); if (ret) { - __free_dma_tx_desc_resources(priv, queue); + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); netdev_err(priv->dev, "Failed to init TX desc.\n"); return; } stmmac_reset_tx_queue(priv, queue); - stmmac_clear_tx_descriptors(priv, queue); + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, tx_q->queue_index); @@ -6428,7 +6544,7 @@ void stmmac_xdp_release(struct net_device *dev) stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ - free_dma_desc_resources(priv); + free_dma_desc_resources(priv, &priv->dma_conf); /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); @@ -6453,14 +6569,14 @@ int stmmac_xdp_open(struct net_device *dev) u32 chan; int ret; - ret = alloc_dma_desc_resources(priv); + ret = alloc_dma_desc_resources(priv, &priv->dma_conf); if (ret < 0) { netdev_err(dev, "%s: DMA descriptors allocation failed\n", __func__); goto dma_desc_error; } - ret = init_dma_desc_rings(dev, GFP_KERNEL); + ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); if (ret < 0) { netdev_err(dev, "%s: DMA descriptors initialization failed\n", __func__); @@ -6542,7 +6658,7 @@ int stmmac_xdp_open(struct net_device *dev) stmmac_hw_teardown(dev); init_error: - free_dma_desc_resources(priv); + free_dma_desc_resources(priv, &priv->dma_conf); dma_desc_error: return ret; } @@ -7411,7 +7527,7 @@ int stmmac_resume(struct device *dev) stmmac_reset_queues_param(priv); stmmac_free_tx_skbufs(priv); - stmmac_clear_descriptors(priv); + stmmac_clear_descriptors(priv, &priv->dma_conf); stmmac_hw_setup(ndev, false); stmmac_init_coalesce(priv); From 3470079687448abac42deb62774253be1d6bdef3 Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 23 Jul 2022 16:29:33 +0200 Subject: [PATCH 5/5] net: ethernet: stmicro: stmmac: permit MTU change with interface up Remove the limitation where the interface needs to be down to change MTU by releasing and opening the stmmac driver to set the new MTU. Also call the set_filter function to correctly init the port. This permits to remove the EBUSY error while the ethernet port is running permitting a correct MTU change if for example a DSA request a MTU change for a switch CPU port. Signed-off-by: Christian Marangi Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 81bbc8a1eb2f7..070b5ef165eba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5551,18 +5551,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) { struct stmmac_priv *priv = netdev_priv(dev); int txfifosz = priv->plat->tx_fifo_size; + struct stmmac_dma_conf *dma_conf; const int mtu = new_mtu; + int ret; if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; txfifosz /= priv->plat->tx_queues_to_use; - if (netif_running(dev)) { - netdev_err(priv->dev, "must be stopped to change its MTU\n"); - return -EBUSY; - } - if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); return -EINVAL; @@ -5574,8 +5571,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) return -EINVAL; - dev->mtu = mtu; + if (netif_running(dev)) { + netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); + /* Try to allocate the new DMA conf with the new mtu */ + dma_conf = stmmac_setup_dma_desc(priv, mtu); + if (IS_ERR(dma_conf)) { + netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", + mtu); + return PTR_ERR(dma_conf); + } + stmmac_release(dev); + + ret = __stmmac_open(dev, dma_conf); + kfree(dma_conf); + if (ret) { + netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); + return ret; + } + + stmmac_set_rx_mode(dev); + } + + dev->mtu = mtu; netdev_update_features(dev); return 0;