Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 290219
b: refs/heads/master
c: 621b4d6
h: refs/heads/master
i:
  290217: 8fb115f
  290215: d103bbb
v: v3
  • Loading branch information
Dmitry Kravkov authored and David S. Miller committed Feb 21, 2012
1 parent c6fb018 commit cdcb7ff
Show file tree
Hide file tree
Showing 9 changed files with 178 additions and 83 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2b68c18194b078489a84023564bcf7464b6c7b37
refs/heads/master: 621b4d66b27e70ba9a0e8fa4676d9c4f916c8343
12 changes: 12 additions & 0 deletions trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
Original file line number Diff line number Diff line change
Expand Up @@ -445,6 +445,8 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
u16 gro_size;
u16 full_page;
};

#define Q_STATS_OFFSET32(stat_name) \
Expand Down Expand Up @@ -473,6 +475,11 @@ struct bnx2x_fp_txdata {
int txq_index;
};

enum bnx2x_tpa_mode_t {
TPA_MODE_LRO,
TPA_MODE_GRO
};

struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */

Expand All @@ -489,6 +496,8 @@ struct bnx2x_fastpath {

dma_addr_t status_blk_mapping;

enum bnx2x_tpa_mode_t mode;

u8 max_cos; /* actual number of active tx coses */
struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];

Expand Down Expand Up @@ -1199,6 +1208,8 @@ struct bnx2x {
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* TCP with Timestamp Option (32) + IPv6 (40) */
#define ETH_MAX_TPA_HEADER_SIZE 72

/* Max supported alignment is 256 (8 shift) */
#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
Expand Down Expand Up @@ -1269,6 +1280,7 @@ struct bnx2x {
#define NO_MCP_FLAG (1 << 9)

#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
Expand Down
149 changes: 103 additions & 46 deletions trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,13 +209,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
fp->last_max_sge = idx;
}

static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
struct eth_fast_path_rx_cqe *fp_cqe)
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
u16 sge_len,
struct eth_end_agg_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
le16_to_cpu(fp_cqe->len_on_bd)) >>
SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
Expand All @@ -226,15 +224,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));

DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));

/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp,
le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));

last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
Expand Down Expand Up @@ -328,6 +326,12 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
tpa_info->gro_size = gro_size;
}

#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
Expand Down Expand Up @@ -384,25 +388,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
}

static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 queue, struct sk_buff *skb,
struct bnx2x_agg_info *tpa_info,
u16 pages,
struct sk_buff *skb,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u32 i, frag_len, frag_size, pages;
int err;
int j;
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
u32 i, frag_len, frag_size;
int err, j, frag_id = 0;
u16 len_on_bd = tpa_info->len_on_bd;
u16 full_page = 0, gro_size = 0;

frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;

if (fp->mode == TPA_MODE_GRO) {
gro_size = tpa_info->gro_size;
full_page = tpa_info->full_page;
}

/* This is needed in order to enable forwarding support */
if (frag_size)
if (frag_size) {
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
tpa_info->parsing_flags, len_on_bd);

/* set for GRO */
if (fp->mode == TPA_MODE_GRO)
skb_shinfo(skb)->gso_type =
(GET_FLAG(tpa_info->parsing_flags,
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
PRS_FLAG_OVERETH_IPV6) ?
SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
}


#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
Expand All @@ -419,7 +438,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,

/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */
frag_len = min_t(u32, frag_size,
(u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));

rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;

Expand All @@ -435,9 +459,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);

/* Add one frag and update the appropriate fields in the skb */
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
old_rx_pg.page, offset, len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
}
}

skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
Expand All @@ -449,18 +485,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}

static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 queue, struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
u16 pages,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
u32 pad = tpa_info->placement_offset;
u8 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = NULL;
u8 *data = rx_buf->data;
/* alloc new skb */
u8 *new_data;
u8 *new_data, *data = rx_buf->data;
u8 old_tpa_state = tpa_info->tpa_state;

tpa_info->tpa_state = BNX2X_TPA_STOP;
Expand Down Expand Up @@ -500,7 +535,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;

if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
Expand Down Expand Up @@ -565,7 +601,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
u16 len, pad, queue;
u8 *data;

#ifdef BNX2X_STOP_ON_ERROR
Expand All @@ -586,17 +622,21 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe_fp->status_flags,
le32_to_cpu(cqe_fp->rss_hash_result),
le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
le16_to_cpu(cqe_fp->vlan_tag),
le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));

/* is this a slowpath msg? */
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
}

rx_buf = &fp->rx_buf_ring[bd_cons];
data = rx_buf->data;

if (!CQE_TYPE_FAST(cqe_fp_type)) {
struct bnx2x_agg_info *tpa_info;
u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
Expand All @@ -616,28 +656,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bnx2x_tpa_start(fp, queue,
bd_cons, bd_prod,
cqe_fp);

goto next_rx;
} else {
u16 queue =
cqe->end_agg_cqe.queue_index;
DP(NETIF_MSG_RX_STATUS,
"calling tpa_stop on queue %d\n",
queue);

bnx2x_tpa_stop(bp, fp, queue,
&cqe->end_agg_cqe,
comp_ring_cons);
}
queue = cqe->end_agg_cqe.queue_index;
tpa_info = &fp->tpa_info[queue];
DP(NETIF_MSG_RX_STATUS,
"calling tpa_stop on queue %d\n",
queue);

frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
tpa_info->len_on_bd;

if (fp->mode == TPA_MODE_GRO)
pages = (frag_size + tpa_info->full_page - 1) /
tpa_info->full_page;
else
pages = SGE_PAGE_ALIGN(frag_size) >>
SGE_PAGE_SHIFT;

bnx2x_tpa_stop(bp, fp, tpa_info, pages,
&cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
if (bp->panic)
return 0;
if (bp->panic)
return 0;
#endif

bnx2x_update_sge_prod(fp, cqe_fp);
goto next_cqe;
}
bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
goto next_cqe;
}
/* non TPA */
len = le16_to_cpu(cqe_fp->pkt_len);
len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
pad = cqe_fp->placement_offset;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
Expand Down Expand Up @@ -3440,13 +3490,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
}

netdev_features_t bnx2x_fix_features(struct net_device *dev,
netdev_features_t features)
netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);

/* TPA requires Rx CSUM offloading */
if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
features &= ~NETIF_F_LRO;
features &= ~NETIF_F_GRO;
}

return features;
}
Expand All @@ -3462,6 +3514,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
else
flags &= ~TPA_ENABLE_FLAG;

if (features & NETIF_F_GRO)
flags |= GRO_ENABLE_FLAG;
else
flags &= ~GRO_ENABLE_FLAG;

if (features & NETIF_F_LOOPBACK) {
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
Expand Down
24 changes: 22 additions & 2 deletions trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
Original file line number Diff line number Diff line change
Expand Up @@ -534,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif

netdev_features_t bnx2x_fix_features(struct net_device *dev,
netdev_features_t features);
netdev_features_t features);
int bnx2x_set_features(struct net_device *dev, netdev_features_t features);

/**
Expand Down Expand Up @@ -1491,6 +1492,18 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
return max_cfg;
}

/* checks if HW supports GRO for given MTU */
static inline bool bnx2x_mtu_allows_gro(int mtu)
{
/* gro frags per page */
int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);

/*
* 1. number of frags should not grow above MAX_SKB_FRAGS
* 2. frag must fit the page
*/
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
}
/**
* bnx2x_bz_fp - zero content of the fastpath structure.
*
Expand Down Expand Up @@ -1556,7 +1569,14 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
*/
fp->disable_tpa = (bp->flags & TPA_ENABLE_FLAG) == 0;
fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
(bp->flags & GRO_ENABLE_FLAG &&
bnx2x_mtu_allows_gro(bp->dev->mtu)));
if (bp->flags & TPA_ENABLE_FLAG)
fp->mode = TPA_MODE_LRO;
else if (bp->flags & GRO_ENABLE_FLAG)
fp->mode = TPA_MODE_GRO;

#ifdef BCM_CNIC
/* We don't want TPA on an FCoE L2 ring */
if (IS_FCOE_FP(fp))
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -1931,7 +1931,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
goto test_loopback_rx_exit;

len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
if (len != pkt_size)
goto test_loopback_rx_exit;

Expand Down
Loading

0 comments on commit cdcb7ff

Please sign in to comment.