Skip to content

Commit

Permalink
net: ethernet: ti: am65-cpsw: streamline RX queue creation and cleanup
Browse files Browse the repository at this point in the history
Introduce am65_cpsw_create_rxqs() and am65_cpsw_destroy_rxqs()
and use them.

Signed-off-by: Roger Quadros <rogerq@kernel.org>
Link: https://patch.msgid.link/20250117-am65-cpsw-streamline-v2-2-91a29c97e569@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Roger Quadros authored and Jakub Kicinski committed Jan 20, 2025
1 parent 681eb2b commit 66c1ae6
Showing 1 changed file with 119 additions and 124 deletions.
243 changes: 119 additions & 124 deletions drivers/net/ethernet/ti/am65-cpsw-nuss.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,35 +498,61 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
bool allow_direct);
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);

static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
int id, port;
int port;

for (id = 0; id < common->rx_ch_num_flows; id++) {
flow = &rx_chn->flows[id];
flow = &rx_chn->flows[id];
napi_disable(&flow->napi_rx);
hrtimer_cancel(&flow->rx_hrtimer);
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!id);

for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
continue;
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
continue;

rxq = &common->ports[port].xdp_rxq[id];
rxq = &common->ports[port].xdp_rxq[id];

if (xdp_rxq_info_is_reg(rxq))
xdp_rxq_info_unreg(rxq);
}
if (xdp_rxq_info_is_reg(rxq))
xdp_rxq_info_unreg(rxq);
}

if (flow->page_pool) {
page_pool_destroy(flow->page_pool);
flow->page_pool = NULL;
}
if (flow->page_pool) {
page_pool_destroy(flow->page_pool);
flow->page_pool = NULL;
}
}

static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
int id;

reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);

if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
if (!id)
dev_err(common->dev, "rx teardown timeout\n");
}

for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
am65_cpsw_destroy_rxq(common, id);

k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
}

static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct page_pool_params pp_params = {
Expand All @@ -541,45 +567,92 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
int id, port, ret;
struct page *page;
int port, ret, i;

for (id = 0; id < common->rx_ch_num_flows; id++) {
flow = &rx_chn->flows[id];
pp_params.napi = &flow->napi_rx;
pool = page_pool_create(&pp_params);
if (IS_ERR(pool)) {
ret = PTR_ERR(pool);
flow = &rx_chn->flows[id];
pp_params.napi = &flow->napi_rx;
pool = page_pool_create(&pp_params);
if (IS_ERR(pool)) {
ret = PTR_ERR(pool);
return ret;
}

flow->page_pool = pool;

/* using same page pool is allowed as no running rx handlers
* simultaneously for both ndevs
*/
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
/* FIXME should we BUG here? */
continue;

rxq = &common->ports[port].xdp_rxq[id];
ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
id, flow->napi_rx.napi_id);
if (ret)
goto err;

ret = xdp_rxq_info_reg_mem_model(rxq,
MEM_TYPE_PAGE_POOL,
pool);
if (ret)
goto err;
}

for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
page = page_pool_dev_alloc_pages(flow->page_pool);
if (!page) {
dev_err(common->dev, "cannot allocate page in flow %d\n",
id);
ret = -ENOMEM;
goto err;
}

flow->page_pool = pool;
ret = am65_cpsw_nuss_rx_push(common, page, id);
if (ret < 0) {
dev_err(common->dev,
"cannot submit page to rx channel flow %d, error %d\n",
id, ret);
am65_cpsw_put_page(flow, page, false);
goto err;
}
}

/* using same page pool is allowed as no running rx handlers
* simultaneously for both ndevs
*/
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
continue;
napi_enable(&flow->napi_rx);
return 0;

rxq = &common->ports[port].xdp_rxq[id];
err:
am65_cpsw_destroy_rxq(common, id);
return ret;
}

ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
id, flow->napi_rx.napi_id);
if (ret)
goto err;
static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
{
int id, ret;

ret = xdp_rxq_info_reg_mem_model(rxq,
MEM_TYPE_PAGE_POOL,
pool);
if (ret)
goto err;
for (id = 0; id < common->rx_ch_num_flows; id++) {
ret = am65_cpsw_create_rxq(common, id);
if (ret) {
dev_err(common->dev, "couldn't create rxq %d: %d\n",
id, ret);
goto err;
}
}

ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
goto err;
}

return 0;

err:
am65_cpsw_destroy_xdp_rxqs(common);
for (--id; id >= 0; id--)
am65_cpsw_destroy_rxq(common, id);

return ret;
}

Expand Down Expand Up @@ -643,7 +716,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);

am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
}

Expand Down Expand Up @@ -718,12 +790,9 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int port_idx, i, ret, tx, flow_idx;
struct am65_cpsw_rx_flow *flow;
int port_idx, ret, tx;
u32 val, port_mask;
struct page *page;

if (common->usage_count)
return 0;
Expand Down Expand Up @@ -783,47 +852,9 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)

am65_cpsw_qos_tx_p0_rate_init(common);

ret = am65_cpsw_create_xdp_rxqs(common);
if (ret) {
dev_err(common->dev, "Failed to create XDP rx queues\n");
ret = am65_cpsw_create_rxqs(common);
if (ret)
return ret;
}

for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
flow = &rx_chn->flows[flow_idx];
for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
page = page_pool_dev_alloc_pages(flow->page_pool);
if (!page) {
dev_err(common->dev, "cannot allocate page in flow %d\n",
flow_idx);
ret = -ENOMEM;
goto fail_rx;
}

ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
if (ret < 0) {
dev_err(common->dev,
"cannot submit page to rx channel flow %d, error %d\n",
flow_idx, ret);
am65_cpsw_put_page(flow, page, false);
goto fail_rx;
}
}
}

ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
goto fail_rx;
}

for (i = 0; i < common->rx_ch_num_flows ; i++) {
napi_enable(&rx_chn->flows[i].napi_rx);
if (rx_chn->flows[i].irq_disabled) {
rx_chn->flows[i].irq_disabled = false;
enable_irq(rx_chn->flows[i].irq);
}
}

for (tx = 0; tx < common->tx_ch_num; tx++) {
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
Expand All @@ -846,30 +877,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
tx--;
}

for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
flow = &rx_chn->flows[flow_idx];
if (!flow->irq_disabled) {
disable_irq(flow->irq);
flow->irq_disabled = true;
}
napi_disable(&flow->napi_rx);
}

k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);

fail_rx:
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!i);

am65_cpsw_destroy_xdp_rxqs(common);
am65_cpsw_destroy_rxqs(common);

return ret;
}

static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int i;

Expand Down Expand Up @@ -903,31 +917,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
}

reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);

if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "rx teardown timeout\n");
}

for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
napi_disable(&rx_chn->flows[i].napi_rx);
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!i);
}

k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);

am65_cpsw_destroy_rxqs(common);
cpsw_ale_stop(common->ale);

writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);

am65_cpsw_destroy_xdp_rxqs(common);

dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
Expand Down

0 comments on commit 66c1ae6

Please sign in to comment.