Skip to content

Commit

Permalink
Merge branch 'thunderx-xdp'
Browse files Browse the repository at this point in the history
Sunil Goutham says:

====================
net: thunderx: Adds XDP support

This patch series adds support for XDP to ThunderX NIC driver
which is used on CN88xx, CN81xx and CN83xx platforms.

Patches 1-4 are performance improvement and cleanup patches
which are done keeping XDP performance bottlenecks in view.
Rest of the patches adds actual XDP support.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed May 2, 2017
2 parents ee0d8d8 + 7732253 commit b0e9227
Show file tree
Hide file tree
Showing 6 changed files with 657 additions and 124 deletions.
10 changes: 8 additions & 2 deletions drivers/net/ethernet/cavium/thunder/nic.h
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,14 @@ struct nicvf_drv_stats {
u64 tx_csum_overflow;

/* driver debug stats */
u64 rcv_buffer_alloc_failures;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;

u64 rcv_buffer_alloc_failures;
u64 page_alloc;

struct u64_stats_sync syncp;
};

Expand All @@ -266,9 +268,9 @@ struct nicvf {
struct net_device *netdev;
struct pci_dev *pdev;
void __iomem *reg_base;
struct bpf_prog *xdp_prog;
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
void *iommu_domain;
u8 vf_id;
u8 sqs_id;
Expand All @@ -294,6 +296,7 @@ struct nicvf {
/* Queue count */
u8 rx_queues;
u8 tx_queues;
u8 xdp_tx_queues;
u8 max_queues;

u8 node;
Expand All @@ -318,6 +321,9 @@ struct nicvf {
struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;

/* Napi */
struct nicvf_cq_poll *napi[8];

/* MSI-X */
u8 num_vec;
char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
Expand Down
29 changes: 22 additions & 7 deletions drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,12 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(tx_csum_overlap),
NICVF_DRV_STAT(tx_csum_overflow),

NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(page_alloc),
};

static const struct nicvf_stat nicvf_queue_stats[] = {
Expand Down Expand Up @@ -720,7 +721,7 @@ static int nicvf_set_channels(struct net_device *dev,
struct nicvf *nic = netdev_priv(dev);
int err = 0;
bool if_up = netif_running(dev);
int cqcount;
u8 cqcount, txq_count;

if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
Expand All @@ -729,10 +730,26 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;

if (nic->xdp_prog &&
((channel->tx_count + channel->rx_count) > nic->max_queues)) {
netdev_err(nic->netdev,
"XDP mode, RXQs + TXQs > Max %d\n",
nic->max_queues);
return -EINVAL;
}

if (if_up)
nicvf_stop(dev);

cqcount = max(channel->rx_count, channel->tx_count);
nic->rx_queues = channel->rx_count;
nic->tx_queues = channel->tx_count;
if (!nic->xdp_prog)
nic->xdp_tx_queues = 0;
else
nic->xdp_tx_queues = channel->rx_count;

txq_count = nic->xdp_tx_queues + nic->tx_queues;
cqcount = max(nic->rx_queues, txq_count);

if (cqcount > MAX_CMP_QUEUES_PER_QS) {
nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
Expand All @@ -741,12 +758,10 @@ static int nicvf_set_channels(struct net_device *dev,
nic->sqs_count = 0;
}

nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);

nic->rx_queues = channel->rx_count;
nic->tx_queues = channel->tx_count;
err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
if (err)
return err;
Expand Down
Loading

0 comments on commit b0e9227

Please sign in to comment.