Skip to content

Commit

Permalink
bnx2x: code beautify
Browse files Browse the repository at this point in the history
This patch does not include any functional changes.
The changes are: empty lines, indentation and comments.

Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Dmitry Kravkov authored and David S. Miller committed Oct 6, 2010
1 parent c2bff63 commit f85582f
Show file tree
Hide file tree
Showing 7 changed files with 318 additions and 233 deletions.
54 changes: 28 additions & 26 deletions drivers/net/bnx2x/bnx2x.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,14 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
offsetof(struct mf_cfg, field))
#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
offsetof(struct mf2_cfg, field))

#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
MF_CFG_ADDR(bp, field), (val))
#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))

#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
(SHMEM2_RD((bp), size) > \
offsetof(struct shmem2_region, field)))
Expand Down Expand Up @@ -310,7 +311,7 @@ struct bnx2x_fastpath {

#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
__le16 *sb_index_values;
__le16 *sb_running_index;
Expand Down Expand Up @@ -349,8 +350,8 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_TERMINATING 0xd0000
#define BNX2X_FP_STATE_TERMINATED 0xe0000

u8 index; /* number in fp array */
u8 cl_id; /* eth client id */
u8 index; /* number in fp array */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
u8 igu_sb_id; /* status block number in HW */
Expand All @@ -375,8 +376,6 @@ struct bnx2x_fastpath {
u16 last_max_sge;
__le16 *rx_cons_sb;



unsigned long tx_pkt,
rx_pkt,
rx_calls;
Expand Down Expand Up @@ -977,7 +976,7 @@ struct bnx2x {
u32 mf2_config[E2_FUNC_MAX];
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
#define IS_MF(bp) (bp->mf_mode != 0)

u8 wol;

Expand Down Expand Up @@ -1302,21 +1301,35 @@ struct bnx2x_func_init_params {
for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)


#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);

/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);

int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);

void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);

static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
{
Expand All @@ -1333,6 +1346,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,

return val;
}

#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = pci_alloc_consistent(bp->pdev, size, y); \
Expand All @@ -1353,6 +1367,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,

#define ILT_NUM_PAGE_ENTRIES (3072)
/* In 57710/11 we use whole table since we have 8 func
* In 57712 we have only 4 func, but use same size per func, then only half of
* the table in use
*/
#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)

Expand All @@ -1366,14 +1382,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))


/* load/unload mode */
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
#define UNLOAD_RECOVERY 2


/* DMAE command defines */
Expand Down Expand Up @@ -1447,7 +1462,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)


/* PCIE link and speed */
#define PCICFG_LINK_WIDTH 0x1f00000
#define PCICFG_LINK_WIDTH_SHIFT 20
Expand Down Expand Up @@ -1596,6 +1610,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_SP_DSB_INDEX \
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])

#define SET_FLAG(value, mask, flag) \
do {\
(value) &= ~(mask);\
Expand Down Expand Up @@ -1630,6 +1645,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#ifndef ETH_MAX_RX_CLIENTS_E2
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif

#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4

Expand All @@ -1649,20 +1665,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,

BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */

/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */

extern void bnx2x_set_ethtool_ops(struct net_device *netdev);

void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);


#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02

int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
#endif /* bnx2x.h */
60 changes: 33 additions & 27 deletions drivers/net/bnx2x/bnx2x_cmn.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
*
*/


#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <net/ipv6.h>
Expand Down Expand Up @@ -136,7 +135,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
*/
smp_mb();

/* TBD need a thresh? */
if (unlikely(netif_tx_queue_stopped(txq))) {
/* Taking tx_lock() is needed to prevent reenabling the queue
* while it's empty. This could have happen if rx_action() gets
Expand Down Expand Up @@ -623,6 +621,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bnx2x_set_skb_rxhash(bp, cqe, skb);

skb_checksum_none_assert(skb);

if (bp->rx_csum) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
Expand Down Expand Up @@ -704,7 +703,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
return IRQ_HANDLED;
}


/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
Expand Down Expand Up @@ -916,6 +914,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
}

static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
Expand Down Expand Up @@ -1185,6 +1184,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
case ETH_RSS_MODE_REGULAR:
bp->num_queues = bnx2x_calc_num_queues(bp);
break;

default:
bp->num_queues = 1;
break;
Expand Down Expand Up @@ -1354,6 +1354,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Enable Timer scan */
REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
#endif

for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
if (rc)
Expand Down Expand Up @@ -1473,11 +1474,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)

/* Stop Tx */
bnx2x_tx_disable(bp);

del_timer_sync(&bp->timer);

SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bnx2x_stats_handle(bp, STATS_EVENT_STOP);

bnx2x_stats_handle(bp, STATS_EVENT_STOP);

/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
Expand Down Expand Up @@ -1514,6 +1517,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)

return 0;
}

int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
{
u16 pmcsr;
Expand Down Expand Up @@ -1560,12 +1564,9 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
return 0;
}



/*
* net_device service functions
*/

int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
Expand Down Expand Up @@ -1595,19 +1596,19 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
* have been actually read (bnx2x_update_fpsb_idx)
* prior to this check (bnx2x_has_rx_work) so that
* we won't write the "newer" value of the status block
* to IGU (if there was a DMA right after
* bnx2x_has_rx_work and if there is no rmb, the memory
* reading (bnx2x_update_fpsb_idx) may be postponed
* to right before bnx2x_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
* have been actually read (bnx2x_update_fpsb_idx)
* prior to this check (bnx2x_has_rx_work) so that
* we won't write the "newer" value of the status block
* to IGU (if there was a DMA right after
* bnx2x_has_rx_work and if there is no rmb, the memory
* reading (bnx2x_update_fpsb_idx) may be postponed
* to right before bnx2x_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
rmb();

if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Expand All @@ -1626,7 +1627,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}


/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
Expand Down Expand Up @@ -1842,6 +1842,7 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,

pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
}

/**
*
* @param skb
Expand Down Expand Up @@ -1914,6 +1915,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,

return hlen;
}

/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
Expand Down Expand Up @@ -2003,13 +2005,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;

tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_ETH_ADDR_TYPE,
mac_type);
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
mac_type);

/* header nbd */
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_HDR_NBDS,
1);
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);

/* remember the first BD of the packet */
tx_buf->first_bd = fp->tx_bd_prod;
Expand Down Expand Up @@ -2065,9 +2065,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)

}

/* Map skb linear data for DMA */
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);

/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
Expand Down Expand Up @@ -2101,6 +2103,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;

/* Handle fragmented skb */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

Expand Down Expand Up @@ -2165,6 +2168,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)

fp->tx_db.data.prod += nbd;
barrier();

DOORBELL(bp, fp->cid, fp->tx_db.raw);

mmiowb();
Expand All @@ -2187,6 +2191,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)

return NETDEV_TX_OK;
}

/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
Expand Down Expand Up @@ -2319,6 +2324,7 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
}

#endif

int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
Expand Down
Loading

0 comments on commit f85582f

Please sign in to comment.