Skip to content

Commit

Permalink
Merge branch 'thunderx-fixes'
Browse files Browse the repository at this point in the history
Sunil Goutham says:

====================
net: thunderx: Fixes for TSO offload issues

This patch series fixes couple of issues w.r.t HW TSO offload
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Sep 1, 2016
2 parents a036244 + 7ceb8a1 commit d3ebc88
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 15 deletions.
1 change: 1 addition & 0 deletions drivers/net/ethernet/cavium/thunder/nic.h
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
bool t88;

/* Receive buffer alloc */
u32 rb_page_offset;
Expand Down
11 changes: 8 additions & 3 deletions drivers/net/ethernet/cavium/thunder/nic_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
int lmac;
u64 lmac_cfg;

/* Max value that can be set is 60 */
if (size > 60)
size = 60;
/* There is a issue in HW where-in while sending GSO sized
* pkts as part of TSO, if pkt len falls below this size
* NIC will zero PAD packet and also updates IP total length.
* Hence set this value to lessthan min pkt size of MAC+IP+TCP
* headers, BGX will do the padding to transmit 64 byte pkt.
*/
if (size > 52)
size = 52;

for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
Expand Down
20 changes: 15 additions & 5 deletions drivers/net/ethernet/cavium/thunder/nicvf_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
struct sq_hdr_subdesc *tso_sqe;

sq = &nic->qs->sq[cqe_tx->sq_idx];

Expand All @@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,

nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
/* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
if (hdr->dont_send) {
/* Get actual TSO descriptors and free them */
tso_sqe =
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
/* In case of HW TSO, HW sends a CQE for each segment of a TSO
* packet instead of a single CQE for the whole TSO packet
* transmitted. Each of this CQE points to the same SQE, so
* avoid freeing same SQE multiple times.
/* In case of SW TSO on 88xx, only last segment will have
* a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
Expand Down Expand Up @@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
u16 sdevid;

err = pci_enable_device(pdev);
if (err) {
Expand Down Expand Up @@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;

pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
if (sdevid == 0xA134)
nic->t88 = true;

/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
Expand Down
64 changes: 57 additions & 7 deletions drivers/net/ethernet/cavium/thunder/nicvf_queues.c
Original file line number Diff line number Diff line change
Expand Up @@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}

#define POST_CQE_DESC_COUNT 2

/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
Expand All @@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}

/* Dummy descriptors to get TSO pkt completion notification */
if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
subdesc_cnt += POST_CQE_DESC_COUNT;

if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;

Expand All @@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;

hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
sq->skbuff[qentry] = (u64)skb;

memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;

if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
/* post_cqe = 0, to avoid HW posting a CQE for every TSO
* segment transmitted on 88xx.
*/
hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
} else {
sq->skbuff[qentry] = (u64)skb;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;
}
hdr->tot_len = len;

/* Offload checksum calculation to HW */
Expand Down Expand Up @@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}

/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
* packet so that a CQE is posted as a notifation for transmission of
* TSO packet.
*/
static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
int tso_sqe, struct sk_buff *skb)
{
struct sq_imm_subdesc *imm;
struct sq_hdr_subdesc *hdr;

sq->skbuff[qentry] = (u64)skb;

hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
/* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1;
/* There is no packet to transmit here */
hdr->dont_send = 1;
hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
hdr->tot_len = 1;
/* Actual TSO header SQE index, needed for cleanup */
hdr->rsvd2 = tso_sqe;

qentry = nicvf_get_nxt_sqentry(sq, qentry);
imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
memset(imm, 0, SND_QUEUE_DESC_SIZE);
imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
imm->len = 1;
}

/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
Expand Down Expand Up @@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
int subdesc_cnt;
int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
Expand Down Expand Up @@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
tso_sqe = qentry;

/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
Expand All @@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}

doorbell:
if (nic->t88 && skb_shinfo(skb)->gso_size) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
}

/* make sure all memory stores are done before ringing doorbell */
smp_wmb();

Expand Down

0 comments on commit d3ebc88

Please sign in to comment.