Skip to content

Commit

Permalink
amd-xgbe: Add netif_* message support to the driver
Browse files Browse the repository at this point in the history
Add support for the network interface message level settings for
determining whether to issue some of the driver messages. Make
use of the netif_* interface where appropriate.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Lendacky, Thomas authored and David S. Miller committed May 15, 2015
1 parent 5452b2d commit 34bf65d
Show file tree
Hide file tree
Showing 7 changed files with 191 additions and 151 deletions.
17 changes: 11 additions & 6 deletions drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 0;
tc_ets_weight = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
netif_dbg(pdata, drv, netdev,
"TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
ets->tc_tx_bw[i], ets->tc_rx_bw[i],
ets->tc_tsa[i]);
netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
ets->prio_tc[i]);

if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
(i >= pdata->hw_feat.tc_cnt))
Expand Down Expand Up @@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);

DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
netif_dbg(pdata, drv, netdev,
"cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);

if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
Expand All @@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)

static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
u8 support = xgbe_dcb_getdcbx(netdev);

DBGPR(" DCBX=%#hhx\n", dcbx);
netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);

if (dcbx & ~support)
return 1;
Expand Down
33 changes: 21 additions & 12 deletions drivers/net/ethernet/amd/xgbe/xgbe-desc.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
if (!ring->rdata)
return -ENOMEM;

DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
ring->rdesc, ring->rdesc_dma, ring->rdata);
netif_dbg(pdata, drv, pdata->netdev,
"rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
ring->rdesc, &ring->rdesc_dma, ring->rdata);

DBGPR("<--xgbe_init_ring\n");

Expand All @@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)

channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
DBGPR(" %s - tx_ring:\n", channel->name);
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
channel->name);

ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
Expand All @@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
goto err_ring;
}

DBGPR(" %s - rx_ring:\n", channel->name);
netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
channel->name);

ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
netdev_alert(pdata->netdev,
"error initializing Tx ring\n");
"error initializing Rx ring\n");
goto err_ring;
}
}
Expand Down Expand Up @@ -518,8 +523,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);

if (tso) {
DBGPR(" TSO packet\n");

/* Map the TSO header */
skb_dma = dma_map_single(pdata->dev, skb->data,
packet->header_len, DMA_TO_DEVICE);
Expand All @@ -529,6 +532,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb header: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, packet->header_len);

offset = packet->header_len;

Expand All @@ -550,8 +556,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
cur_index, skb_dma, len);
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb data: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, len);

datalen -= len;
offset += len;
Expand All @@ -563,7 +570,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}

for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
DBGPR(" mapping frag %u\n", i);
netif_dbg(pdata, tx_queued, pdata->netdev,
"mapping frag %u\n", i);

frag = &skb_shinfo(skb)->frags[i];
offset = 0;
Expand All @@ -582,8 +590,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
rdata->mapped_as_page = 1;
DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
cur_index, skb_dma, len);
netif_dbg(pdata, tx_queued, pdata->netdev,
"skb frag: index=%u, dma=%pad, len=%u\n",
cur_index, &skb_dma, len);

datalen -= len;
offset += len;
Expand Down
70 changes: 41 additions & 29 deletions drivers/net/ethernet/amd/xgbe/xgbe-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;

DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);

return 0;
Expand All @@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;

DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);

return 0;
Expand All @@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
mac_addr[0] = ha->addr[4];
mac_addr[1] = ha->addr[5];

DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
*mac_reg);
netif_dbg(pdata, drv, pdata->netdev,
"adding mac address %pM at %#x\n",
ha->addr, *mac_reg);

XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
Expand Down Expand Up @@ -1322,15 +1325,17 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
DBGPR(" TC%u using SP\n", i);
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
case IEEE_8021QAZ_TSA_ETS:
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);

DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
netif_dbg(pdata, drv, pdata->netdev,
"TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
Expand Down Expand Up @@ -1359,7 +1364,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
}
mask &= 0xff;

DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
tc, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);

Expand Down Expand Up @@ -1457,8 +1463,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Create a context descriptor if this is a TSO packet */
if (tso_context || vlan_context) {
if (tso_context) {
DBGPR(" TSO context descriptor, mss=%u\n",
packet->mss);
netif_dbg(pdata, tx_queued, pdata->netdev,
"TSO context descriptor, mss=%u\n",
packet->mss);

/* Set the MSS size */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
Expand All @@ -1476,8 +1483,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
}

if (vlan_context) {
DBGPR(" VLAN context descriptor, ctag=%u\n",
packet->vlan_ctag);
netif_dbg(pdata, tx_queued, pdata->netdev,
"VLAN context descriptor, ctag=%u\n",
packet->vlan_ctag);

/* Mark it as a CONTEXT descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
Expand Down Expand Up @@ -1596,9 +1604,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);

#ifdef XGMAC_ENABLE_TX_DESC_DUMP
xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
#endif
if (netif_msg_tx_queued(pdata))
xgbe_dump_tx_desc(pdata, ring, start_index,
packet->rdesc_count, 1);

/* Make sure ownership is written to the descriptor */
dma_wmb();
Expand Down Expand Up @@ -1640,9 +1648,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN bit */
dma_rmb();

#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
#endif
if (netif_msg_rx_status(pdata))
xgbe_dump_rx_desc(pdata, ring, ring->cur);

if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
/* Timestamp Context Descriptor */
Expand Down Expand Up @@ -1713,7 +1720,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);

if (!err || !etlt) {
/* No error if err is 0 or etlt is 0 */
Expand All @@ -1724,7 +1731,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
RX_NORMAL_DESC0,
OVT);
DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
packet->vlan_ctag);
}
} else {
if ((etlt == 0x05) || (etlt == 0x06))
Expand Down Expand Up @@ -2032,9 +2040,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);

netdev_notice(pdata->netdev,
"%d Tx hardware queues, %d byte fifo per queue\n",
pdata->tx_q_count, ((fifo_size + 1) * 256));
netif_info(pdata, drv, pdata->netdev,
"%d Tx hardware queues, %d byte fifo per queue\n",
pdata->tx_q_count, ((fifo_size + 1) * 256));
}

static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
Expand All @@ -2048,9 +2056,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);

netdev_notice(pdata->netdev,
"%d Rx hardware queues, %d byte fifo per queue\n",
pdata->rx_q_count, ((fifo_size + 1) * 256));
netif_info(pdata, drv, pdata->netdev,
"%d Rx hardware queues, %d byte fifo per queue\n",
pdata->rx_q_count, ((fifo_size + 1) * 256));
}

static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
Expand All @@ -2069,14 +2077,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)

for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
netif_dbg(pdata, drv, pdata->netdev,
"TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}

if (i < qptc_extra) {
DBGPR(" TXq%u mapped to TC%u\n", queue, i);
netif_dbg(pdata, drv, pdata->netdev,
"TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
Expand All @@ -2094,13 +2104,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
netif_dbg(pdata, drv, pdata->netdev,
"PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}

if (i < ppq_extra) {
DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
netif_dbg(pdata, drv, pdata->netdev,
"PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
Expand Down
Loading

0 comments on commit 34bf65d

Please sign in to comment.