Skip to content

Commit

Permalink
Merge branch 'thunderx-ethtool'
Browse files Browse the repository at this point in the history
Sunil Goutham says:

====================
thunderx: More ethtool support and BGX configuration changes

These patches adds support to set queue sizes from ethtool and changes
the way serdes lane configuration is done by BGX driver on 81/83xx
platforms.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jan 25, 2017
2 parents 3979ad7 + fff37fd commit 761095c
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 86 deletions.
39 changes: 37 additions & 2 deletions drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;

ring->rx_max_pending = MAX_RCV_BUF_COUNT;
ring->rx_pending = qs->rbdr_len;
ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
ring->rx_pending = qs->cq_len;
ring->tx_max_pending = MAX_SND_QUEUE_LEN;
ring->tx_pending = qs->sq_len;
}

static int nicvf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
u32 rx_count, tx_count;

/* Due to HW errata this is not supported on T88 pass 1.x silicon */
if (pass1_silicon(nic->pdev))
return -EINVAL;

if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;

tx_count = clamp_t(u32, ring->tx_pending,
MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
rx_count = clamp_t(u32, ring->rx_pending,
MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);

if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
return 0;

/* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
qs->sq_len = rounddown_pow_of_two(tx_count);
qs->cq_len = rounddown_pow_of_two(rx_count);

if (netif_running(netdev)) {
nicvf_stop(netdev);
nicvf_open(netdev);
}

return 0;
}

static int nicvf_get_rss_hash_opts(struct nicvf *nic,
struct ethtool_rxnfc *info)
{
Expand Down Expand Up @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_regs = nicvf_get_regs,
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
.set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
Expand Down
19 changes: 16 additions & 3 deletions drivers/net/ethernet/cavium/thunder/nicvf_queues.c
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
cq_cfg.qsize = CMP_QSIZE;
cq_cfg.qsize = ilog2(qs->cq_len >> 10);
cq_cfg.avg_con = 0;
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);

Expand Down Expand Up @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
sq_cfg.qsize = SND_QSIZE;
sq_cfg.qsize = ilog2(qs->sq_len >> 10);
sq_cfg.tstmp_bgx_intf = 0;
sq_cfg.cq_limit = 0;
/* CQ's level at which HW will stop processing SQEs to avoid
* transmitting a pkt with no space in CQ to post CQE_TX.
*/
sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);

/* Set threshold value for interrupt generation */
Expand Down Expand Up @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
{
bool disable = false;
struct queue_set *qs = nic->qs;
struct queue_set *pqs = nic->pnicvf->qs;
int qidx;

if (!qs)
return 0;

/* Take primary VF's queue lengths.
* This is needed to take queue lengths set from ethtool
* into consideration.
*/
if (nic->sqs_mode && pqs) {
qs->cq_len = pqs->cq_len;
qs->sq_len = pqs->sq_len;
}

if (enable) {
if (nicvf_alloc_resources(nic))
return -ENOMEM;
Expand Down
16 changes: 12 additions & 4 deletions drivers/net/ethernet/cavium/thunder/nicvf_queues.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,9 @@
/* Default queue count per QS, its lengths and threshold values */
#define DEFAULT_RBDR_CNT 1

#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QSIZE SND_QUEUE_SIZE0
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
Expand All @@ -70,11 +71,18 @@
/* Keep CQ and SQ sizes same, if timestamping
* is enabled this equation will change.
*/
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QSIZE CMP_QUEUE_SIZE0
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */

/* No of CQEs that might anyway gets used by HW due to pipelining
* effects irrespective of PASS/DROP/LEVELS being configured
*/
#define CMP_QUEUE_PIPELINE_RSVD 544

#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
Expand All @@ -93,8 +101,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
#define RQ_PASS_CQ_LVL 160ULL
#define RQ_DROP_CQ_LVL 144ULL
#define RQ_PASS_CQ_LVL 192ULL
#define RQ_DROP_CQ_LVL 184ULL

/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
Expand Down
95 changes: 18 additions & 77 deletions drivers/net/ethernet/cavium/thunder/thunder_bgx.c
Original file line number Diff line number Diff line change
Expand Up @@ -894,17 +894,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
struct device *dev = &bgx->pdev->dev;
struct lmac *lmac;
char str[20];
u8 dlm;

if (lmacid > bgx->max_lmac)
if (!bgx->is_dlm && lmacid)
return;

lmac = &bgx->lmac[lmacid];
dlm = (lmacid / 2) + (bgx->bgx_id * 2);
if (!bgx->is_dlm)
sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
else
sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);

switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
Expand Down Expand Up @@ -990,7 +988,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
{
struct lmac *lmac;
struct lmac *olmac;
u64 cmr_cfg;
u8 lmac_type;
u8 lane_to_sds;
Expand All @@ -1010,62 +1007,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
return;
}

/* On 81xx BGX can be split across 2 DLMs
* firmware programs lmac_type of LMAC0 and LMAC2
/* For DLMs or SLMs on 80/81/83xx so many lane configurations
* are possible and vary across boards. Also Kernel doesn't have
* any way to identify board type/info and since firmware does,
* just take lmac type and serdes lane config as is.
*/
if ((idx == 0) || (idx == 2)) {
cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
lane_to_sds = (u8)(cmr_cfg & 0xFF);
/* Check if config is not reset value */
if ((lmac_type == 0) && (lane_to_sds == 0xE4))
lmac->lmac_type = BGX_MODE_INVALID;
else
lmac->lmac_type = lmac_type;
lmac_set_training(bgx, lmac, lmac->lmacid);
lmac_set_lane2sds(bgx, lmac);

olmac = &bgx->lmac[idx + 1];
/* Check if other LMAC on the same DLM is already configured by
* firmware, if so use the same config or else set as same, as
* that of LMAC 0/2.
* This check is needed as on 80xx only one lane of each of the
* DLM of BGX0 is used, so have to rely on firmware for
* distingushing 80xx from 81xx.
*/
cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
lane_to_sds = (u8)(cmr_cfg & 0xFF);
if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
olmac->lmac_type = lmac->lmac_type;
lmac_set_lane2sds(bgx, olmac);
} else {
olmac->lmac_type = lmac_type;
olmac->lane_to_sds = lane_to_sds;
}
lmac_set_training(bgx, olmac, olmac->lmacid);
}
}

static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
{
struct lmac *lmac;

if (!bgx->is_dlm)
return true;

lmac = &bgx->lmac[0];
if (lmac->lmac_type == BGX_MODE_INVALID)
return false;

return true;
cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
lane_to_sds = (u8)(cmr_cfg & 0xFF);
/* Check if config is reset value */
if ((lmac_type == 0) && (lane_to_sds == 0xE4))
lmac->lmac_type = BGX_MODE_INVALID;
else
lmac->lmac_type = lmac_type;
lmac->lane_to_sds = lane_to_sds;
lmac_set_training(bgx, lmac, lmac->lmacid);
}

static void bgx_get_qlm_mode(struct bgx *bgx)
{
struct lmac *lmac;
struct lmac *lmac01;
struct lmac *lmac23;
u8 idx;

/* Init all LMAC's type to invalid */
Expand All @@ -1081,29 +1042,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
if (bgx->lmac_count > bgx->max_lmac)
bgx->lmac_count = bgx->max_lmac;

for (idx = 0; idx < bgx->max_lmac; idx++)
bgx_set_lmac_config(bgx, idx);

if (!bgx->is_dlm || bgx->is_rgx) {
bgx_print_qlm_mode(bgx, 0);
return;
}

if (bgx->lmac_count) {
bgx_print_qlm_mode(bgx, 0);
bgx_print_qlm_mode(bgx, 2);
}

/* If DLM0 is not in BGX mode then LMAC0/1 have
* to be configured with serdes lanes of DLM1
*/
if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
return;
for (idx = 0; idx < bgx->lmac_count; idx++) {
lmac01 = &bgx->lmac[idx];
lmac23 = &bgx->lmac[idx + 2];
lmac01->lmac_type = lmac23->lmac_type;
lmac01->lane_to_sds = lmac23->lane_to_sds;
bgx_set_lmac_config(bgx, idx);
bgx_print_qlm_mode(bgx, idx);
}
}

Expand Down

0 comments on commit 761095c

Please sign in to comment.