Skip to content

Commit

Permalink
bna: Enable Multi Buffer RX
Browse files Browse the repository at this point in the history
The CT2 HW supports multi-buffer Rx. This patch provides the necessary changes
for bnad to use multi-buffer Rx feature. For BNAD, multi-buffer Rx is by
default enabled when MTU is > 4096. For >4096 MTU, q0 data/large buffers are of
2048 size. As the resource requirements of multi-buffer Rx are different new Rx
needs to be created to use this feature. ASIC posts multiple completions if
frame exceeds buffer size. The last completion is marked with EOP flag.
 - Separate HQ and DQ enums for resource allocations and configurations.
 - rx_config and rxq structure changes to pass the correct info from bnad.
 - DQ depth need not be same as HQ depth. So CQ depth is adjusted accordingly.
 - Rx CFG frame size is taken from configured MTU.
 - Rx q0 buffer size is configured from bnad s rx_config when multi-buffer is
   enabled.
 - Poll for entire frame completion.
 - Once EOP completion is received gather the number of vectors used by the
   frame to submit it to the stack.
 - Changed MTU to frame size wherever necessary.

Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Rasesh Mody authored and David S. Miller committed Dec 18, 2013
1 parent fe1624c commit e29aa33
Show file tree
Hide file tree
Showing 6 changed files with 343 additions and 125 deletions.
3 changes: 2 additions & 1 deletion drivers/net/ethernet/brocade/bna/bfi_enet.h
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,8 @@ enum bfi_enet_hds_type {

struct bfi_enet_rx_cfg {
u8 rxq_type;
u8 rsvd[3];
u8 rsvd[1];
u16 frame_size;

struct {
u8 max_header_size;
Expand Down
4 changes: 4 additions & 0 deletions drivers/net/ethernet/brocade/bna/bna_hw_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,10 @@ do { \
#define BNA_CQ_EF_REMOTE (1 << 19)

#define BNA_CQ_EF_LOCAL (1 << 20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
#define BNA_CQ_EF_EOP (1 << 31)

/* Data structures */

Expand Down
66 changes: 46 additions & 20 deletions drivers/net/ethernet/brocade/bna/bna_tx_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1811,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));

cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
Expand All @@ -1832,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
q0->buffer_size =
bna_enet_mtu_get(&rx->bna->enet);
if (q0->multi_buffer)
/* multi-buffer is enabled by allocating
* a new rx with new set of resources.
* q0->buffer_size should be initialized to
* fragment size.
*/
cfg_req->rx_cfg.multi_buffer =
BNA_STATUS_T_ENABLED;
else
q0->buffer_size =
bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
Expand Down Expand Up @@ -2383,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
u32 hq_depth;
u32 dq_depth;

dq_depth = q_cfg->q_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;

BNA_TO_POWER_OF_2_HIGH(cq_depth);
Expand Down Expand Up @@ -2501,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
u32 page_count;
struct bna_mem_descr *hqunmap_mem;
struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
Expand All @@ -2514,16 +2524,19 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
int i;
int dpage_count, hpage_count, rcb_idx;
u32 dpage_count, hpage_count;
u32 hq_idx, dq_idx, rcb_idx;
u32 cq_depth, i;
u32 page_count;

if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;

intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
Expand Down Expand Up @@ -2575,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
}

rx->num_paths = rx_cfg->num_paths;
for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
Expand Down Expand Up @@ -2618,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rxp = rxp;

q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
rcb_idx++;
q0->rcb->q_depth = rx_cfg->q_depth;
q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
rcb_idx++; dq_idx++;
q0->rcb->q_depth = rx_cfg->q0_depth;
q0->q_depth = rx_cfg->q0_depth;
q0->multi_buffer = rx_cfg->q0_multi_buf;
q0->buffer_size = rx_cfg->q0_buf_size;
q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
Expand All @@ -2640,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q1->rxp = rxp;

q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
rcb_idx++;
q1->rcb->q_depth = rx_cfg->q_depth;
q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
rcb_idx++; hq_idx++;
q1->rcb->q_depth = rx_cfg->q1_depth;
q1->q_depth = rx_cfg->q1_depth;
q1->multi_buffer = BNA_STATUS_T_DISABLED;
q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
: rx_cfg->small_buff_size;
: rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;

Expand All @@ -2663,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* Setup CQ */

rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
rxp->cq.ccb->q_depth = rx_cfg->q_depth +
((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
0 : rx_cfg->q_depth);
cq_depth = rx_cfg->q0_depth +
((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
0 : rx_cfg->q1_depth);
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
BNA_TO_POWER_OF_2_HIGH(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
Expand Down
49 changes: 31 additions & 18 deletions drivers/net/ethernet/brocade/bna/bna_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
enum bna_rx_mem_type {
BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
BNA_RX_RES_MEM_T_IBIDX = 12,
BNA_RX_RES_MEM_T_RIT = 13,
BNA_RX_RES_T_INTR = 14, /* Rx interrupts */
BNA_RX_RES_T_MAX = 15
BNA_RX_RES_MEM_T_UNMAPHQ = 2,
BNA_RX_RES_MEM_T_UNMAPDQ = 3,
BNA_RX_RES_MEM_T_CQPT = 4,
BNA_RX_RES_MEM_T_CSWQPT = 5,
BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
BNA_RX_RES_MEM_T_HQPT = 7,
BNA_RX_RES_MEM_T_DQPT = 8,
BNA_RX_RES_MEM_T_HSWQPT = 9,
BNA_RX_RES_MEM_T_DSWQPT = 10,
BNA_RX_RES_MEM_T_DPAGE = 11,
BNA_RX_RES_MEM_T_HPAGE = 12,
BNA_RX_RES_MEM_T_IBIDX = 13,
BNA_RX_RES_MEM_T_RIT = 14,
BNA_RX_RES_T_INTR = 15,
BNA_RX_RES_T_MAX = 16
};

enum bna_tx_type {
Expand Down Expand Up @@ -583,6 +584,8 @@ struct bna_rxq {

int buffer_size;
int q_depth;
u32 num_vecs;
enum bna_status multi_buffer;

struct bna_qpt qpt;
struct bna_rcb *rcb;
Expand Down Expand Up @@ -632,6 +635,8 @@ struct bna_ccb {
struct bna_rcb *rcb[2];
void *ctrl; /* For bnad */
struct bna_pkt_rate pkt_rate;
u32 pkts_una;
u32 bytes_per_intr;

/* Control path */
struct bna_cq *cq;
Expand Down Expand Up @@ -671,14 +676,22 @@ struct bna_rx_config {
int num_paths;
enum bna_rxp_type rxp_type;
int paused;
int q_depth;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
* for SLR and HDS queue type. Large buffer size comes from
* enet->mtu.
* for SLR and HDS queue type.
*/
int small_buff_size;
u32 frame_size;

/* header or small queue */
u32 q1_depth;
u32 q1_buf_size;

/* data or large queue */
u32 q0_depth;
u32 q0_buf_size;
u32 q0_num_vecs;
enum bna_status q0_multi_buf;

enum bna_status rss_status;
struct bna_rss_config rss_config;
Expand Down
Loading

0 comments on commit e29aa33

Please sign in to comment.