diff --git a/[refs] b/[refs] index 7a1c9adad37e..14a3d30e0c62 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fb04121417b32329f92a260b490da8434d704e3d +refs/heads/master: 244e27ad4d9e561c688c4da3383fec890be832d0 diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atl2.c b/trunk/drivers/net/ethernet/atheros/atlx/atl2.c index 6762dc406b25..071f4c858969 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/trunk/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2258,7 +2258,7 @@ static int get_permanent_address(struct atl2_hw *hw) u32 Addr[2]; u32 i, Control; u16 Register; - u8 EthAddr[ETH_ALEN]; + u8 EthAddr[NODE_ADDRESS_SIZE]; bool KeyValid; if (is_valid_ether_addr(hw->perm_mac_addr)) @@ -2299,7 +2299,7 @@ static int get_permanent_address(struct atl2_hw *hw) *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { - memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } return 1; @@ -2334,7 +2334,7 @@ static int get_permanent_address(struct atl2_hw *hw) *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); if (is_valid_ether_addr(EthAddr)) { - memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } /* maybe MAC-address is from BIOS */ @@ -2344,7 +2344,7 @@ static int get_permanent_address(struct atl2_hw *hw) *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { - memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE); return 0; } @@ -2358,6 +2358,8 @@ static int get_permanent_address(struct atl2_hw *hw) */ static s32 atl2_read_mac_addr(struct atl2_hw *hw) { + u16 i; + if (get_permanent_address(hw)) { /* for test */ /* FIXME: shouldn't we use random_ether_addr() here? */ @@ -2369,7 +2371,8 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw) hw->perm_mac_addr[5] = 0x38; } - memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN); + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; return 0; } diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atl2.h b/trunk/drivers/net/ethernet/atheros/atlx/atl2.h index 3ebe19f7242b..bf9016ebdd9b 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atl2.h +++ b/trunk/drivers/net/ethernet/atheros/atlx/atl2.h @@ -47,6 +47,7 @@ extern int ethtool_ioctl(struct ifreq *ifr); #define PCI_COMMAND_REGISTER PCI_COMMAND #define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN #define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \ ((a)->hw_addr + (reg)))) @@ -428,8 +429,8 @@ struct atl2_hw { u8 flash_vendor; u8 dma_fairness; - u8 mac_addr[ETH_ALEN]; - u8 perm_mac_addr[ETH_ALEN]; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; /* FIXME */ /* bool phy_preamble_sup; */ diff --git a/trunk/drivers/net/ethernet/atheros/atlx/atlx.h b/trunk/drivers/net/ethernet/atheros/atlx/atlx.h index 448f5dcc02e6..14054b75aa62 100644 --- a/trunk/drivers/net/ethernet/atheros/atlx/atlx.h +++ b/trunk/drivers/net/ethernet/atheros/atlx/atlx.h @@ -484,6 +484,7 @@ /* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */ #define EEPROM_SUM 0xBABA +#define NODE_ADDRESS_SIZE 6 struct atlx_spi_flash_dev { const char *manu_name; /* manufacturer id */ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e37161f19250..c0cf313e6519 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -58,22 +58,18 @@ #define DRV_MODULE_NAME "bnx2x" /* for messages that are currently off */ -#define BNX2X_MSG_OFF 0x0 -#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */ -#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */ -#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ -#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ -#define BNX2X_MSG_IOV 0x0800000 -#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ -#define BNX2X_MSG_ETHTOOL 0x4000000 -#define BNX2X_MSG_DCB 0x8000000 +#define BNX2X_MSG_OFF 0 +#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ +#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ /* regular debug print */ #define DP(__mask, fmt, ...) \ do { \ - if (unlikely(bp->msg_enable & (__mask))) \ + if (bp->msg_enable & (__mask)) \ pr_notice("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ bp->dev ? (bp->dev->name) : "?", \ @@ -82,14 +78,14 @@ do { \ #define DP_CONT(__mask, fmt, ...) \ do { \ - if (unlikely(bp->msg_enable & (__mask))) \ + if (bp->msg_enable & (__mask)) \ pr_cont(fmt, ##__VA_ARGS__); \ } while (0) /* errors debug print */ #define BNX2X_DBG_ERR(fmt, ...) \ do { \ - if (unlikely(netif_msg_probe(bp))) \ + if (netif_msg_probe(bp)) \ pr_err("[%s:%d(%s)]" fmt, \ __func__, __LINE__, \ bp->dev ? (bp->dev->name) : "?", \ @@ -112,7 +108,7 @@ do { \ /* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ - if (unlikely(netif_msg_probe(bp))) \ + if (netif_msg_probe(bp)) \ dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ } while (0) @@ -1482,7 +1478,6 @@ struct bnx2x { u16 stats_counter; struct bnx2x_eth_stats eth_stats; - struct host_func_stats func_stats; struct bnx2x_eth_stats_old eth_stats_old; struct bnx2x_net_stats_old net_stats_old; struct bnx2x_fw_port_stats_old fw_stats_old; @@ -2118,22 +2113,14 @@ void bnx2x_set_ethtool_ops(struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); -#define BNX2X_MF_SD_PROTOCOL(bp) \ +#define BNX2X_MF_PROTOCOL(bp) \ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) #ifdef BCM_CNIC -#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ - (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) - -#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \ - (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE) - -#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) -#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) +#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) -#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ - (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ - BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) +#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) #endif #endif /* bnx2x.h */ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f1f3ca65667a..b814f4eaed19 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -75,10 +75,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); - DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); /* unmap first bd */ + DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); @@ -109,6 +110,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* now free frags */ while (nbd > 0) { + DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); @@ -118,11 +120,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, /* release skb */ WARN_ON(!skb); - if (likely(skb)) { + if (skb) { (*pkts_compl)++; (*bytes_compl) += skb->len; } - dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -150,8 +151,8 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) pkt_cons = TX_BD(sw_cons); - DP(NETIF_MSG_TX_DONE, - "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n", + DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " + " pkt_cons %u\n", txdata->txq_index, hw_cons, sw_cons, pkt_cons); bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, @@ -529,7 +530,8 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR if (pad + len > fp->rx_buf_size) { - BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", + BNX2X_ERR("skb_put is about to fail... " + "pad %d len %d rx_buf_size %d\n", pad, len, fp->rx_buf_size); bnx2x_panic(); return; @@ -549,8 +551,8 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); napi_gro_receive(&fp->napi, skb); } else { - DP(NETIF_MSG_RX_STATUS, - "Failed to allocate new pages - dropping packet!\n"); + DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" + " - dropping packet!\n"); dev_kfree_skb_any(skb); } @@ -626,9 +628,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; - DP(NETIF_MSG_RX_STATUS, - "CQE type %x err %x status %x queue %x vlan %x len %u\n", - CQE_TYPE(cqe_fp_flags), + DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" + " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32_to_cpu(cqe_fp->rss_hash_result), le16_to_cpu(cqe_fp->vlan_tag), @@ -651,7 +652,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (fp->disable_tpa && (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", CQE_TYPE(cqe_fp_type)); #endif @@ -705,7 +707,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) prefetch(data + pad); /* speedup eth_type_trans() */ /* is this an error packet? */ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { - DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + DP(NETIF_MSG_RX_ERR, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); fp->eth_q_stats.rx_err_discard_pkt++; @@ -719,7 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) (len <= RX_COPY_THRESH)) { skb = netdev_alloc_skb_ip_align(bp->dev, len); if (skb == NULL) { - DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because of alloc failure\n"); fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; @@ -740,8 +742,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) } skb_reserve(skb, pad); } else { - DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, - "ERROR packet dropped because of alloc failure\n"); + DP(NETIF_MSG_RX_ERR, + "ERROR packet dropped because " + "of alloc failure\n"); fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); @@ -810,8 +813,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) struct bnx2x *bp = fp->bp; u8 cos; - DP(NETIF_MSG_INTR, - "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", + DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " + "[fp %d fw_sd %d igusb %d]\n", fp->index, fp->fw_sb_id, fp->igu_sb_id); bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); @@ -1024,8 +1027,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); if (!first_buf->data) { - BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", - j); + BNX2X_ERR("Failed to allocate TPA " + "skb pool for queue[%d] - " + "disabling TPA on this " + "queue!\n", j); bnx2x_free_tpa_pool(bp, fp, i); fp->disable_tpa = 1; break; @@ -1045,10 +1050,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { - BNX2X_ERR("was only able to allocate %d rx sges\n", - i); - BNX2X_ERR("disabling TPA for queue[%d]\n", - j); + BNX2X_ERR("was only able to allocate " + "%d rx sges\n", i); + BNX2X_ERR("disabling TPA for " + "queue[%d]\n", j); /* Cleanup already allocated elements */ bnx2x_free_rx_sge_range(bp, fp, ring_prod); @@ -1203,8 +1208,8 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) for_each_eth_queue(bp, i) { if (nvecs == offset) return; - DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", - i, bp->msix_table[offset].vector); + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " + "irq\n", i, bp->msix_table[offset].vector); free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); } @@ -1226,21 +1231,21 @@ int bnx2x_enable_msix(struct bnx2x *bp) int msix_vec = 0, i, rc, req_cnt; bp->msix_table[msix_vec].entry = msix_vec; - BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", + DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", bp->msix_table[0].entry); msix_vec++; #ifdef BCM_CNIC bp->msix_table[msix_vec].entry = msix_vec; - BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); msix_vec++; #endif /* We need separate vectors for ETH queues only (not FCoE) */ for_each_eth_queue(bp, i) { bp->msix_table[msix_vec].entry = msix_vec; - BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n", - msix_vec, msix_vec, i); + DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " + "(fastpath #%u)\n", msix_vec, msix_vec, i); msix_vec++; } @@ -1256,12 +1261,14 @@ int bnx2x_enable_msix(struct bnx2x *bp) /* how less vectors we will have? */ int diff = req_cnt - rc; - BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); + DP(NETIF_MSG_IFUP, + "Trying to use less MSI-X vectors: %d\n", rc); rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); if (rc) { - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + DP(NETIF_MSG_IFUP, + "MSI-X is not attainable rc %d\n", rc); return rc; } /* @@ -1269,13 +1276,13 @@ int bnx2x_enable_msix(struct bnx2x *bp) */ bp->num_queues -= diff; - BNX2X_DEV_INFO("New queue configuration set: %d\n", + DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", bp->num_queues); } else if (rc) { /* fall to INTx if not enough memory */ if (rc == -ENOMEM) bp->flags |= DISABLE_MSI_FLAG; - BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); return rc; } @@ -1318,7 +1325,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) i = BNX2X_NUM_ETH_QUEUES(bp); offset = 1 + CNIC_PRESENT; - netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", + netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" + " ... fp[%d] %d\n", bp->msix_table[0].vector, 0, bp->msix_table[offset].vector, i - 1, bp->msix_table[offset + i - 1].vector); @@ -1332,7 +1340,7 @@ int bnx2x_enable_msi(struct bnx2x *bp) rc = pci_enable_msi(bp->pdev); if (rc) { - BNX2X_DEV_INFO("MSI is not attainable\n"); + DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); return -1; } bp->flags |= USING_MSI_FLAG; @@ -1453,8 +1461,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp) } #ifdef BCM_CNIC - /* override in STORAGE SD mode */ - if (IS_MF_STORAGE_SD(bp)) + /* override in ISCSI SD mod */ + if (IS_MF_ISCSI_SD(bp)) bp->num_queues = 1; #endif /* Add special queues */ @@ -1509,7 +1517,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) return rc; } - DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", + DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", tx, rx); return rc; @@ -1574,7 +1582,7 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp) int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) { - struct bnx2x_config_rss_params params = {NULL}; + struct bnx2x_config_rss_params params = {0}; int i; /* Although RSS is meaningless when there is a single HW queue we @@ -1637,7 +1645,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -1658,7 +1666,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) { int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; - struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct bnx2x_mcast_ramrod_params rparam = {0}; struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; /***************** Cleanup MACs' object first *************************/ @@ -1690,8 +1698,8 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) /* Add a DEL command... */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) - BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", - rc); + BNX2X_ERR("Failed to add a new DEL command to a multi-cast " + "object: %d\n", rc); /* ...and wait until all pending commands are cleared */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); @@ -1729,10 +1737,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) int i, rc; #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - BNX2X_ERR("Can't load NIC when there is panic\n"); + if (unlikely(bp->panic)) return -EPERM; - } #endif bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; @@ -1752,7 +1758,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) * allocated only once, fp index, max_cos, bp pointer. * Also set fp->disable_tpa. */ - DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) bnx2x_bz_fp(bp, i); @@ -1809,7 +1814,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) LOAD_ERROR_EXIT(bp, load_error1); } if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { - BNX2X_ERR("Driver load refused\n"); rc = -EBUSY; /* other port in diagnostic mode */ LOAD_ERROR_EXIT(bp, load_error1); } @@ -1870,7 +1874,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } else bp->port.pmf = 0; - DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf); + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); /* Init Function state controlling object */ bnx2x__init_func_obj(bp); @@ -1886,7 +1890,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Connect to IRQs */ rc = bnx2x_setup_irqs(bp); if (rc) { - BNX2X_ERR("IRQs setup failed\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); LOAD_ERROR_EXIT(bp, load_error2); } @@ -1937,27 +1940,21 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) for_each_nondefault_queue(bp, i) { rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); + if (rc) LOAD_ERROR_EXIT(bp, load_error4); - } } rc = bnx2x_init_rss_pf(bp); - if (rc) { - BNX2X_ERR("PF RSS init failed\n"); + if (rc) LOAD_ERROR_EXIT(bp, load_error4); - } /* Now when Clients are configured we are ready to work */ bp->state = BNX2X_STATE_OPEN; /* Configure a ucast MAC */ rc = bnx2x_set_eth_mac(bp, true); - if (rc) { - BNX2X_ERR("Setting Ethernet MAC failed\n"); + if (rc) LOAD_ERROR_EXIT(bp, load_error4); - } if (bp->pending_max) { bnx2x_update_max_mf_config(bp, bp->pending_max); @@ -2094,8 +2091,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bnx2x_release_leader_lock(bp); smp_mb(); - DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n"); - BNX2X_ERR("Can't unload in closed or error state\n"); + DP(NETIF_MSG_HW, "Releasing a leadership...\n"); + return -EINVAL; } @@ -2200,7 +2197,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) /* If there is no power capability, silently succeed */ if (!bp->pm_cap) { - BNX2X_DEV_INFO("No power capability. Breaking.\n"); + DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); return 0; } @@ -2241,7 +2238,6 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) break; default: - dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); return -EINVAL; } return 0; @@ -2311,7 +2307,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget) if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { napi_complete(napi); /* Re-enable interrupts */ - DP(NETIF_MSG_RX_STATUS, + DP(NETIF_MSG_HW, "Update index to %d\n", fp->fp_hc_idx); bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, le16_to_cpu(fp->fp_hc_idx), @@ -2345,8 +2341,9 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, h_tx_bd->nbd = cpu_to_le16(nbd); h_tx_bd->nbytes = cpu_to_le16(hlen); - DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", - h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " + "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, + h_tx_bd->addr_lo, h_tx_bd->nbd); /* now get a new data BD * (after the pbd) and fill it */ @@ -2486,7 +2483,8 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, exit_lbl: if (unlikely(to_copy)) DP(NETIF_MSG_TX_QUEUED, - "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n", + "Linearization IS REQUIRED for %s packet. " + "num_frags %d hlen %d first_bd_sz %d\n", (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); @@ -2694,7 +2692,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif /* enable this debug print to view the transmission queue being used - DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", + DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ /* locate the fastpath and the txdata */ @@ -2702,8 +2700,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) txdata = &fp->txdata[txdata_index]; /* enable this debug print to view the tranmission details - DP(NETIF_MSG_TX_QUEUED, - "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", + DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" + " tx_data ptr %p fp pointer %p\n", txdata->cid, fp_index, txdata_index, txdata, fp); */ if (unlikely(bnx2x_tx_avail(bp, txdata) < @@ -2714,8 +2712,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - DP(NETIF_MSG_TX_QUEUED, - "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n", + DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " + "protocol(%x,%x) gso type %x xmit_type %x\n", txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); @@ -2737,8 +2735,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Statistics of linearization */ bp->lin_cnt++; if (skb_linearize(skb) != 0) { - DP(NETIF_MSG_TX_QUEUED, - "SKB linearization failed - silently dropping this SKB\n"); + DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " + "silently dropping this SKB\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2748,8 +2746,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mapping = dma_map_single(&bp->pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - DP(NETIF_MSG_TX_QUEUED, - "SKB mapping failed - silently dropping this SKB\n"); + DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " + "silently dropping this SKB\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2844,8 +2842,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); pkt_size = tx_start_bd->nbytes; - DP(NETIF_MSG_TX_QUEUED, - "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", + DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" + " nbytes %d flags %x vlan %x\n", tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), tx_start_bd->bd_flags.as_bitfield, @@ -2888,8 +2886,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { unsigned int pkts_compl = 0, bytes_compl = 0; - DP(NETIF_MSG_TX_QUEUED, - "Unable to map page - dropping packet...\n"); + DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " + "dropping packet...\n"); /* we need unmap all buffers already mapped * for this SKB; @@ -2945,7 +2943,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pbd_e1x) DP(NETIF_MSG_TX_QUEUED, - "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n", + "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" + " tcp_flags %x xsum %x seq %u hlen %u\n", pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, @@ -3021,22 +3020,23 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* requested to support too many traffic classes */ if (num_tc > bp->max_cos) { - BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n", - num_tc, bp->max_cos); + DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" + " requested: %d. max supported is %d\n", + num_tc, bp->max_cos); return -EINVAL; } /* declare amount of supported traffic classes */ if (netdev_set_num_tc(dev, num_tc)) { - BNX2X_ERR("failed to declare %d traffic classes\n", num_tc); + DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n", + num_tc); return -EINVAL; } /* configure priority to traffic class mapping */ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); - DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, - "mapping priority %d to tc %d\n", + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, bp->prio_to_cos[prio]); } @@ -3056,8 +3056,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) count = BNX2X_NUM_ETH_QUEUES(bp); offset = cos * MAX_TXQS_PER_COS; netdev_set_tc_queue(dev, cos, count, offset); - DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, - "mapping tc %d to offset %d count %d\n", + DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n", cos, offset, count); } @@ -3071,16 +3070,12 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) { - BNX2X_ERR("Requested MAC address is not valid\n"); + if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) return -EINVAL; - } #ifdef BCM_CNIC - if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { - BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); + if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data)) return -EINVAL; - } #endif if (netif_running(dev)) { @@ -3154,7 +3149,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - DP(NETIF_MSG_IFDOWN, + DP(BNX2X_MSG_SP, "freeing tx memory of fp %d cos %d cid %d\n", fp_index, cos, txdata->cid); @@ -3199,7 +3194,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) int rx_ring_size = 0; #ifdef BCM_CNIC - if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { + if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; } else @@ -3253,8 +3248,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - DP(NETIF_MSG_IFUP, - "allocating tx memory of fp %d cos %d\n", + DP(BNX2X_MSG_SP, "allocating tx memory of " + "fp %d cos %d\n", index, cos); BNX2X_ALLOC(txdata->tx_buf_ring, @@ -3491,7 +3486,6 @@ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) cp->fcoe_wwn_port_name_lo); break; default: - BNX2X_ERR("Wrong WWN type requested - %d\n", type); return -EINVAL; } @@ -3505,15 +3499,13 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - BNX2X_ERR("Can't perform change MTU during parity recovery\n"); + netdev_err(dev, "Handling parity error recovery. Try again later\n"); return -EAGAIN; } if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { - BNX2X_ERR("Can't support requested MTU size\n"); + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) return -EINVAL; - } /* This does not race with packet allocation * because the actual alloc size is @@ -3643,7 +3635,7 @@ int bnx2x_resume(struct pci_dev *pdev) bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - BNX2X_ERR("Handling parity error recovery. Try again later\n"); + netdev_err(dev, "Handling parity error recovery. Try again later\n"); return -EAGAIN; } @@ -3688,9 +3680,8 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); REG_WR8(bp, addr, ticks); - DP(NETIF_MSG_IFUP, - "port %x fw_sb_id %d sb_index %d ticks %d\n", - port, fw_sb_id, sb_index, ticks); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); } static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, @@ -3705,9 +3696,8 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR16(bp, addr, flags); - DP(NETIF_MSG_IFUP, - "port %x fw_sb_id %d sb_index %d disable %d\n", - port, fw_sb_id, sb_index, disable); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); } void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 8b163388659a..5904b1b1dad4 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -598,7 +598,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, (update << IGU_REGULAR_BUPDATE_SHIFT) | (op << IGU_REGULAR_ENABLE_INT_SHIFT)); - DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", + DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", cmd_data.sb_id_and_flags, igu_addr); REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); @@ -648,8 +648,8 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { - DP(NETIF_MSG_HW, - "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", + DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } @@ -668,6 +668,8 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); /* Make sure that ACK is written */ @@ -701,6 +703,9 @@ static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) COMMAND_REG_SIMD_MASK); u32 result = REG_RD(bp, hc_addr); + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + barrier(); return result; } @@ -710,7 +715,7 @@ static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); u32 result = REG_RD(bp, igu_addr); - DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", + DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", result, igu_addr); barrier(); @@ -888,16 +893,13 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; dma_addr_t mapping; - if (unlikely(page == NULL)) { - BNX2X_ERR("Can't alloc sge\n"); + if (unlikely(page == NULL)) return -ENOMEM; - } mapping = dma_map_page(&bp->pdev->dev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { __free_pages(page, PAGES_PER_SGE_SHIFT); - BNX2X_ERR("Can't map sge\n"); return -ENOMEM; } @@ -927,7 +929,6 @@ static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { kfree(data); - BNX2X_ERR("Can't map rx data\n"); return -ENOMEM; } @@ -970,7 +971,7 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, */ static inline int bnx2x_func_start(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; struct bnx2x_func_start_params *start_params = &func_params.params.start; @@ -1298,7 +1299,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; - DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); } @@ -1343,7 +1344,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); - DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); /* qZone id equals to FW (per path) client id */ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); @@ -1362,8 +1363,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); - DP(NETIF_MSG_IFUP, - "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } @@ -1376,7 +1377,8 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, while (bnx2x_has_tx_work_unload(txdata)) { if (!cnt) { - BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", txdata->txq_index, txdata->tx_pkt_prod, txdata->tx_pkt_cons); #ifdef BNX2X_STOP_ON_ERROR @@ -1453,8 +1455,8 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) netif_addr_lock_bh(bp->dev); if (bp->sp_state & mask) { - BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", - bp->sp_state, mask); + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); netif_addr_unlock_bh(bp->dev); return false; } @@ -1490,7 +1492,7 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT; if (!max_cfg) { - DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } @@ -1596,7 +1598,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) #endif } -#ifdef BCM_CNIC /** * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. * @@ -1604,7 +1605,7 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) * */ void bnx2x_get_iscsi_info(struct bnx2x *bp); -#endif + /* returns func by VN for current port */ static inline int func_by_vn(struct bnx2x *bp, int vn) { @@ -1654,7 +1655,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) RESET_FLAGS(drv_flags, flags); SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); } } @@ -1664,7 +1665,7 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) if (is_valid_ether_addr(addr)) return true; #ifdef BCM_CNIC - if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) + if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) return true; #endif return false; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 4f9244bd7530..4446a42e8bdc 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -121,6 +121,26 @@ static void bnx2x_pfc_clear(struct bnx2x *bp) { struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; nig_params.pause_enable = 1; +#ifdef BNX2X_SAFC + if (bp->flags & SAFC_TX_FLAG) { + u32 high = 0, low = 0; + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) { + if (bp->pri_map[i] == 1) + high |= (1 << i); + if (bp->pri_map[i] == 0) + low |= (1 << i); + } + + nig_params.llfc_low_priority_classes = high; + nig_params.llfc_low_priority_classes = low; + + nig_params.pause_enable = 0; + nig_params.llfc_enable = 1; + nig_params.llfc_out_en = 1; + } +#endif /* BNX2X_SAFC */ bnx2x_acquire_phy_lock(bp); bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); @@ -147,27 +167,27 @@ static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); /* pfc */ - DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n", features->pfc.pri_en_bitmap); - DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n", features->pfc.pfc_caps); - DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n", features->pfc.enabled); - DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n", features->app.default_pri); - DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n", features->app.tc_supported); - DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n", + DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n", features->app.enabled); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", i, features->app.app_pri_tbl[i].app_id); - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", i, features->app.app_pri_tbl[i].pri_bitmap); - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", i, features->app.app_pri_tbl[i].appBitfield); } @@ -201,16 +221,13 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); - if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND)) - DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n"); if (app->enabled && - !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH | - DCBX_REMOTE_APP_TLV_NOT_FOUND)) { + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { bp->dcbx_port_params.app.enabled = true; @@ -239,7 +256,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, LLFC_TRAFFIC_TYPE_ISCSI); } } else { - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n"); bp->dcbx_port_params.app.enabled = false; for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; @@ -259,10 +276,8 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n"); - if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND)) - DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n"); /* Clean up old settings of ets on COS */ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { @@ -272,10 +287,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, cos_params[i].pri_bitmask = 0; } - if (bp->dcbx_port_params.app.enabled && ets->enabled && - !GET_FLAGS(error, - DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) { - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n"); + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) && + ets->enabled) { + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n"); bp->dcbx_port_params.ets.enabled = true; bnx2x_dcbx_get_ets_pri_pg_tbl(bp, @@ -290,7 +305,7 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, ets, pg_pri_orginal_spread); } else { - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n"); bp->dcbx_port_params.ets.enabled = false; ets->pri_pg_tbl[0] = 0; @@ -304,18 +319,16 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, { if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); - if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND)) - DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n"); - if (bp->dcbx_port_params.app.enabled && pfc->enabled && - !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH | - DCBX_REMOTE_PFC_TLV_NOT_FOUND)) { + if (bp->dcbx_port_params.app.enabled && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && + pfc->enabled) { bp->dcbx_port_params.pfc.enabled = true; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = ~(pfc->pri_en_bitmap); } else { - DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n"); + DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n"); bp->dcbx_port_params.pfc.enabled = false; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; } @@ -339,7 +352,7 @@ static void bnx2x_dcbx_map_nw(struct bnx2x *bp) for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { if (cos_params[i].pri_bitmask & nw_prio) { /* extend the bitmask with unmapped */ - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "cos %d extended with 0x%08x\n", i, unmapped); cos_params[i].pri_bitmask |= unmapped; break; @@ -430,18 +443,18 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_TX_STOP; - DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); + DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); return bnx2x_func_state_change(bp, &func_params); } static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; struct bnx2x_func_tx_start_params *tx_params = &func_params.params.tx_start; @@ -450,7 +463,7 @@ static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) bnx2x_dcbx_fw_struct(bp, tx_params); - DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); + DP(NETIF_MSG_LINK, "START TRAFFIC\n"); return bnx2x_func_state_change(bp, &func_params); } @@ -516,7 +529,7 @@ static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) /* * In E3B0 the configuration may have more than 2 COS. */ -static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) { struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); struct bnx2x_ets_params ets_params = { 0 }; @@ -575,7 +588,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); int rc; - DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n", + DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n", dcbx_remote_mib_offset); if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { @@ -604,7 +617,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); int rc; - DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); + DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); @@ -680,7 +693,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask & (1 << prio)) { bp->prio_to_cos[prio] = cos; - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "tx_mapping %d --> %d\n", prio, cos); } } @@ -699,7 +712,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) switch (state) { case BNX2X_DCBX_STATE_NEG_RECEIVED: { - DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); #ifdef BCM_DCBNL /** * Delete app tlvs from dcbnl before reading new @@ -749,7 +762,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) return; } case BNX2X_DCBX_STATE_TX_PAUSED: - DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n"); + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); @@ -757,7 +770,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) return; case BNX2X_DCBX_STATE_TX_RELEASED: - DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); + DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); #ifdef BCM_DCBNL /* @@ -848,7 +861,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, (u8)dp->admin_configuration_bw_precentage[i]); - DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n", + DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n", i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); } @@ -856,7 +869,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, (u8)dp->admin_configuration_ets_pg[i]); - DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n", + DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n", i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } @@ -910,7 +923,7 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) bp->dcb_state = false; bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; } - DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n", + DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : @@ -932,30 +945,30 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bp->dcbx_config_params.admin_ets_reco_valid = 1; bp->dcbx_config_params.admin_app_priority_willing = 1; - bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; - bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; - bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00; + bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50; + bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50; bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1; bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; - bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2; bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; - bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1; + bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2; bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; - bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7; + bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5; + bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6; + bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7; bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; @@ -964,12 +977,25 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; - bp->dcbx_config_params.admin_pfc_bitmap = 0x0; - bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; - bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; + bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */ + bp->dcbx_config_params.admin_priority_app_table[0].valid = 1; + bp->dcbx_config_params.admin_priority_app_table[1].valid = 1; bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; - bp->dcbx_config_params.admin_default_priority = 0; + bp->dcbx_config_params.admin_priority_app_table[0].priority = 3; + bp->dcbx_config_params.admin_priority_app_table[1].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[2].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[3].priority = 0; + bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1; + bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0; + bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906; + bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260; + bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0; + bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0; + bp->dcbx_config_params.admin_default_priority = + bp->dcbx_config_params.admin_priority_app_table[1].priority; } void bnx2x_dcbx_init(struct bnx2x *bp) @@ -985,7 +1011,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp) * the function is pmf * shmem2 contains DCBX support fields */ - DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", + DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && @@ -993,7 +1019,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp) dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); - DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n", + DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", dcbx_lldp_params_offset); bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); @@ -1015,36 +1041,38 @@ bnx2x_dcbx_print_cos_params(struct bnx2x *bp, u8 pri = 0; u8 cos = 0; - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); - DP(BNX2X_MSG_DCB, - "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n", + DP(NETIF_MSG_LINK, + "pdev->params.dcbx_port_params.pfc." + "priority_non_pauseable_mask %x\n", bp->dcbx_port_params.pfc.priority_non_pauseable_mask); for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { - DP(BNX2X_MSG_DCB, - "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n", - cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pri_bitmask %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); - DP(BNX2X_MSG_DCB, - "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n", - cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].bw_tbl %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); - DP(BNX2X_MSG_DCB, - "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n", - cos, bp->dcbx_port_params.ets.cos_params[cos].strict); + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].strict %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].strict); - DP(BNX2X_MSG_DCB, - "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n", - cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); + DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets." + "cos_params[%d].pauseable %x\n", cos, + bp->dcbx_port_params.ets.cos_params[cos].pauseable); } for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { - DP(BNX2X_MSG_DCB, - "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n", - pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); + DP(NETIF_MSG_LINK, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d]." + "priority %x\n", pri, + pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); } @@ -1091,7 +1119,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, help_data->num_of_pg++; } } - DP(BNX2X_MSG_DCB, + DP(NETIF_MSG_LINK, "add_traf_type %d pg_found %s num_of_pg %d\n", add_traf_type, (false == pg_found) ? "NO" : "YES", help_data->num_of_pg); @@ -1284,7 +1312,8 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) - BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n"); + BNX2X_ERR("Invalid value for pri_join_mask -" + " could not find a priority\n"); cos_data->data[0].pri_join_mask = pri_mask_without_pri; cos_data->data[1].pri_join_mask = pri_tested; @@ -1597,10 +1626,8 @@ static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, num_of_app_pri--; } - if (num_spread_of_entries) { - BNX2X_ERR("Didn't succeed to spread strict priorities\n"); + if (num_spread_of_entries) return -EINVAL; - } return 0; } @@ -1648,7 +1675,8 @@ static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { if (bnx2x_dcbx_join_pgs(bp, ets, help_data, DCBX_COS_MAX_NUM_E3B0)) { - BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n"); + BNX2X_ERR("Unable to reduce the number of PGs -" + "we will disables ETS\n"); bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); return; @@ -1748,24 +1776,24 @@ static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, if (p->pauseable && DCBX_PFC_PRI_GET_NON_PAUSE(bp, p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for pausable COS %d\n", - i); + BNX2X_ERR("Inconsistent config for " + "pausable COS %d\n", i); if (!p->pauseable && DCBX_PFC_PRI_GET_PAUSE(bp, p->pri_bitmask) != 0) - BNX2X_ERR("Inconsistent config for nonpausable COS %d\n", - i); + BNX2X_ERR("Inconsistent config for " + "nonpausable COS %d\n", i); } } if (p->pauseable) - DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n", + DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", i, cos_data.data[i].pri_join_mask); else - DP(BNX2X_MSG_DCB, - "COS %d NONPAUSABLE prijoinmask 0x%x\n", - i, cos_data.data[i].pri_join_mask); + DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " + "0x%x\n", + i, cos_data.data[i].pri_join_mask); } bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; @@ -1780,7 +1808,7 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); - DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n", + DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n", i, set_configuration_ets_pg[i]); } } @@ -1876,14 +1904,14 @@ static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state); return bp->dcb_state; } static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); return 0; @@ -1893,7 +1921,7 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n"); + DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n"); /* first the HW mac address */ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); @@ -1910,7 +1938,7 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid); + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid); if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) return; @@ -1935,7 +1963,7 @@ static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct); + DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct); if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) return; @@ -1949,14 +1977,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, u8 up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); + DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n"); } static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, @@ -1964,7 +1992,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "prio = %d\n", prio); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); /** * bw_pct ingnored - band-width percentage devision between user @@ -1990,7 +2018,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid); + DP(NETIF_MSG_LINK, "pgid = %d\n", pgid); *bw_pct = 0; @@ -2005,7 +2033,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, u8 *up_map) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); *prio_type = *pgid = *bw_pct = *up_map = 0; } @@ -2014,7 +2042,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); + DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n"); *bw_pct = 0; } @@ -2023,7 +2051,7 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 setting) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting); + DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting); if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) return; @@ -2038,7 +2066,7 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "prio = %d\n", prio); + DP(NETIF_MSG_LINK, "prio = %d\n", prio); *setting = 0; @@ -2053,21 +2081,21 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) struct bnx2x *bp = netdev_priv(netdev); int rc = 0; - DP(BNX2X_MSG_DCB, "SET-ALL\n"); + DP(NETIF_MSG_LINK, "SET-ALL\n"); if (!bnx2x_dcbnl_set_valid(bp)) return 1; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, - "Handling parity error recovery. Try again later\n"); + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); return 1; } if (netif_running(bp->dev)) { bnx2x_nic_unload(bp, UNLOAD_NORMAL); rc = bnx2x_nic_load(bp, LOAD_NORMAL); } - DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); + DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc); if (rc) return 1; @@ -2106,16 +2134,13 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) *cap = BNX2X_DCBX_CAPS; break; default: - BNX2X_ERR("Non valid capability ID\n"); rval = -EINVAL; break; } - } else { - DP(BNX2X_MSG_DCB, "DCB disabled\n"); + } else rval = -EINVAL; - } - DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); + DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap); return rval; } @@ -2124,7 +2149,7 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(BNX2X_MSG_DCB, "tcid %d\n", tcid); + DP(NETIF_MSG_LINK, "tcid %d\n", tcid); if (bp->dcb_state) { switch (tcid) { @@ -2137,14 +2162,11 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) DCBX_COS_MAX_NUM_E2; break; default: - BNX2X_ERR("Non valid TC-ID\n"); rval = -EINVAL; break; } - } else { - DP(BNX2X_MSG_DCB, "DCB disabled\n"); + } else rval = -EINVAL; - } return rval; } @@ -2152,14 +2174,14 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num); + DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num); return -EINVAL; } static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); + DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); if (!bp->dcb_state) return 0; @@ -2170,7 +2192,7 @@ static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off"); if (!bnx2x_dcbnl_set_valid(bp)) return; @@ -2247,11 +2269,9 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) bnx2x_admin_app_set_ent( &bp->dcbx_config_params.admin_priority_app_table[ff], idtype, idval, up); - else { + else /* app table is full */ - BNX2X_ERR("Application table is too large\n"); return -EBUSY; - } /* up configured, if not 0 make sure feature is enabled */ if (up) @@ -2265,13 +2285,11 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n", + DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n", idtype, idval, up); - if (!bnx2x_dcbnl_set_valid(bp)) { - DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); + if (!bnx2x_dcbnl_set_valid(bp)) return -EINVAL; - } /* verify idtype */ switch (idtype) { @@ -2279,7 +2297,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, case DCB_APP_IDTYPE_PORTNUM: break; default: - DP(BNX2X_MSG_DCB, "Wrong ID type\n"); return -EINVAL; } return bnx2x_set_admin_app_up(bp, idtype, idval, up); @@ -2301,13 +2318,13 @@ static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) { struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "state = %02x\n", state); + DP(NETIF_MSG_LINK, "state = %02x\n", state); /* set dcbx mode */ if ((state & BNX2X_DCBX_CAPS) != state) { - BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n", - state); + BNX2X_ERR("Requested DCBX mode %x is beyond advertised " + "capabilities\n", state); return 1; } @@ -2331,7 +2348,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(BNX2X_MSG_DCB, "featid %d\n", featid); + DP(NETIF_MSG_LINK, "featid %d\n", featid); if (bp->dcb_state) { *flags = 0; @@ -2357,14 +2374,11 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, *flags |= DCB_FEATCFG_ERROR; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); rval = -EINVAL; break; } - } else { - DP(BNX2X_MSG_DCB, "DCB disabled\n"); + } else rval = -EINVAL; - } return rval; } @@ -2375,7 +2389,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, struct bnx2x *bp = netdev_priv(netdev); u8 rval = 0; - DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags); + DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags); /* ignore the 'advertise' flag */ if (bnx2x_dcbnl_set_valid(bp)) { @@ -2398,14 +2412,11 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); rval = -EINVAL; break; } - } else { - DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); + } else rval = -EINVAL; - } return rval; } @@ -2416,7 +2427,7 @@ static int bnx2x_peer_appinfo(struct net_device *netdev, int i; struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "APP-INFO\n"); + DP(NETIF_MSG_LINK, "APP-INFO\n"); info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; @@ -2435,7 +2446,7 @@ static int bnx2x_peer_apptable(struct net_device *netdev, int i, j; struct bnx2x *bp = netdev_priv(netdev); - DP(BNX2X_MSG_DCB, "APP-TABLE\n"); + DP(NETIF_MSG_LINK, "APP-TABLE\n"); for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { struct dcbx_app_priority_entry *ent = diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 2cc0a1703970..858d1b5433de 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -280,7 +280,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; - DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" " duplex %d port %d phy_address %d transceiver %d\n" " autoneg %d maxtxpkt %d maxrxpkt %d\n", @@ -301,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (IS_MF_SD(bp)) return 0; - DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" + DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" " supported 0x%x advertising 0x%x speed %u\n" " duplex %d port %d phy_address %d transceiver %d\n" " autoneg %d maxtxpkt %d maxrxpkt %d\n", @@ -325,17 +325,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) line_speed = 10000; if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { - DP(BNX2X_MSG_ETHTOOL, - "To set speed BC %X or higher is required, please upgrade BC\n", - REQ_BC_VER_4_SET_MF_BW); + BNX2X_DEV_INFO("To set speed BC %X or higher " + "is required, please upgrade BC\n", + REQ_BC_VER_4_SET_MF_BW); return -EINVAL; } part = (speed * 100) / line_speed; if (line_speed < speed || !part) { - DP(BNX2X_MSG_ETHTOOL, - "Speed setting should be in a range from 1%% to 100%% of actual line speed\n"); + BNX2X_DEV_INFO("Speed setting should be in a range " + "from 1%% to 100%% " + "of actual line speed\n"); return -EINVAL; } @@ -357,7 +358,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (!(bp->port.supported[0] & SUPPORTED_TP || bp->port.supported[1] & SUPPORTED_TP)) { - DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); + DP(NETIF_MSG_LINK, "Unsupported port type\n"); return -EINVAL; } bp->link_params.multi_phy_config &= @@ -377,7 +378,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (!(bp->port.supported[0] & SUPPORTED_FIBRE || bp->port.supported[1] & SUPPORTED_FIBRE)) { - DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); + DP(NETIF_MSG_LINK, "Unsupported port type\n"); return -EINVAL; } bp->link_params.multi_phy_config &= @@ -391,7 +392,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; break; default: - DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); + DP(NETIF_MSG_LINK, "Unsupported port type\n"); return -EINVAL; } /* Save new config in case command complete successully */ @@ -400,7 +401,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) cfg_idx = bnx2x_get_link_cfg_idx(bp); /* Restore old config in case command failed */ bp->link_params.multi_phy_config = old_multi_phy_config; - DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); + DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); if (cmd->autoneg == AUTONEG_ENABLE) { u32 an_supported_speed = bp->port.supported[cfg_idx]; @@ -409,14 +410,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) an_supported_speed |= (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full); if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n"); + DP(NETIF_MSG_LINK, "Autoneg not supported\n"); return -EINVAL; } /* advertise the requested speed and duplex if supported */ if (cmd->advertising & ~an_supported_speed) { - DP(BNX2X_MSG_ETHTOOL, - "Advertisement parameters are not supported\n"); + DP(NETIF_MSG_LINK, "Advertisement parameters " + "are not supported\n"); return -EINVAL; } @@ -465,7 +466,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Full)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "10M full not supported\n"); return -EINVAL; } @@ -475,7 +476,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { if (!(bp->port.supported[cfg_idx] & SUPPORTED_10baseT_Half)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "10M half not supported\n"); return -EINVAL; } @@ -489,7 +490,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_FULL) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Full)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "100M full not supported\n"); return -EINVAL; } @@ -499,7 +500,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) } else { if (!(bp->port.supported[cfg_idx] & SUPPORTED_100baseT_Half)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "100M half not supported\n"); return -EINVAL; } @@ -511,15 +512,13 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_1000: if (cmd->duplex != DUPLEX_FULL) { - DP(BNX2X_MSG_ETHTOOL, - "1G half not supported\n"); + DP(NETIF_MSG_LINK, "1G half not supported\n"); return -EINVAL; } if (!(bp->port.supported[cfg_idx] & SUPPORTED_1000baseT_Full)) { - DP(BNX2X_MSG_ETHTOOL, - "1G full not supported\n"); + DP(NETIF_MSG_LINK, "1G full not supported\n"); return -EINVAL; } @@ -529,14 +528,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_2500: if (cmd->duplex != DUPLEX_FULL) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "2.5G half not supported\n"); return -EINVAL; } if (!(bp->port.supported[cfg_idx] & SUPPORTED_2500baseX_Full)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "2.5G full not supported\n"); return -EINVAL; } @@ -547,15 +546,13 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) case SPEED_10000: if (cmd->duplex != DUPLEX_FULL) { - DP(BNX2X_MSG_ETHTOOL, - "10G half not supported\n"); + DP(NETIF_MSG_LINK, "10G half not supported\n"); return -EINVAL; } if (!(bp->port.supported[cfg_idx] & SUPPORTED_10000baseT_Full)) { - DP(BNX2X_MSG_ETHTOOL, - "10G full not supported\n"); + DP(NETIF_MSG_LINK, "10G full not supported\n"); return -EINVAL; } @@ -564,7 +561,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) break; default: - DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed); + DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed); return -EINVAL; } @@ -573,7 +570,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->port.advertising[cfg_idx] = advertising; } - DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n" + DP(NETIF_MSG_LINK, "req_line_speed %d\n" " req_duplex %d advertising 0x%x\n", bp->link_params.req_line_speed[cfg_idx], bp->link_params.req_duplex[cfg_idx], @@ -853,16 +850,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bnx2x *bp = netdev_priv(dev); - if (wol->wolopts & ~WAKE_MAGIC) { - DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n"); + if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - } if (wol->wolopts & WAKE_MAGIC) { - if (bp->flags & NO_WOL_FLAG) { - DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n"); + if (bp->flags & NO_WOL_FLAG) return -EINVAL; - } + bp->wol = 1; } else bp->wol = 0; @@ -961,8 +955,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot get access to nvram interface\n"); + DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); return -EBUSY; } @@ -993,8 +986,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot free access to nvram interface\n"); + DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n"); return -EBUSY; } @@ -1068,9 +1060,7 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, break; } } - if (rc == -EBUSY) - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "nvram read timeout expired\n"); + return rc; } @@ -1082,15 +1072,15 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, __be32 val; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + DP(BNX2X_MSG_NVM, "Invalid parameter: offset 0x%x buf_size 0x%x\n", offset, buf_size); return -EINVAL; } if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1135,13 +1125,10 @@ static int bnx2x_get_eeprom(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(dev)) return -EAGAIN; - } - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, eeprom->len, eeprom->len); @@ -1190,9 +1177,6 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, } } - if (rc == -EBUSY) - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "nvram write timeout expired\n"); return rc; } @@ -1207,8 +1191,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, __be32 val; if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1256,15 +1240,15 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + DP(BNX2X_MSG_NVM, "Invalid parameter: offset 0x%x buf_size 0x%x\n", offset, buf_size); return -EINVAL; } if (offset + buf_size > bp->common.flash_size) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +" + " buf_size (0x%x) > flash_size (0x%x)\n", offset, buf_size, bp->common.flash_size); return -EINVAL; } @@ -1312,13 +1296,10 @@ static int bnx2x_set_eeprom(struct net_device *dev, int port = BP_PORT(bp); int rc = 0; u32 ext_phy_config; - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(dev)) return -EAGAIN; - } - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, eeprom->len, eeprom->len); @@ -1327,11 +1308,8 @@ static int bnx2x_set_eeprom(struct net_device *dev, /* PHY eeprom can be accessed only by the PMF */ if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && - !bp->port.pmf) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "wrong magic or interface is not pmf\n"); + !bp->port.pmf) return -EINVAL; - } ext_phy_config = SHMEM_RD(bp, @@ -1443,8 +1421,8 @@ static int bnx2x_set_ringparam(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - DP(BNX2X_MSG_ETHTOOL, - "Handling parity error recovery. Try again later\n"); + netdev_err(dev, "Handling parity error recovery. " + "Try again later\n"); return -EAGAIN; } @@ -1452,10 +1430,8 @@ static int bnx2x_set_ringparam(struct net_device *dev, (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) || (ering->tx_pending > MAX_TX_AVAIL) || - (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + (ering->tx_pending <= MAX_SKB_FRAGS + 4)) return -EINVAL; - } bp->rx_ring_size = ering->rx_pending; bp->tx_ring_size = ering->tx_pending; @@ -1483,7 +1459,7 @@ static void bnx2x_get_pauseparam(struct net_device *dev, epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) == BNX2X_FLOW_CTRL_TX); - DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" " autoneg %d rx_pause %d tx_pause %d\n", epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); } @@ -1496,7 +1472,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (IS_MF(bp)) return 0; - DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" + DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" " autoneg %d rx_pause %d tx_pause %d\n", epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); @@ -1513,7 +1489,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (epause->autoneg) { if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { - DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n"); + DP(NETIF_MSG_LINK, "autoneg not supported\n"); return -EINVAL; } @@ -1523,7 +1499,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, } } - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); if (netif_running(dev)) { @@ -1655,11 +1631,8 @@ static int bnx2x_test_registers(struct bnx2x *bp) { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; - if (!netif_running(bp->dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(bp->dev)) return rc; - } if (CHIP_IS_E1(bp)) hw = BNX2X_CHIP_MASK_E1; @@ -1704,7 +1677,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) /* verify value is as expected */ if ((val & mask) != (wr_val & mask)) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_HW, "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", offset, val, wr_val, mask); goto test_reg_exit; @@ -1758,11 +1731,8 @@ static int bnx2x_test_memory(struct bnx2x *bp) { NULL, 0xffffffff, {0, 0, 0, 0} } }; - if (!netif_running(bp->dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(bp->dev)) return rc; - } if (CHIP_IS_E1(bp)) index = BNX2X_CHIP_E1_OFST; @@ -1777,7 +1747,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_HW, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; } @@ -1792,7 +1762,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); if (val & ~(prty_tbl[i].hw_mask[index])) { - DP(BNX2X_MSG_ETHTOOL, + DP(NETIF_MSG_HW, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; } @@ -1813,7 +1783,7 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) msleep(20); if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) - DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); + DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); } } @@ -1863,7 +1833,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; } @@ -1872,7 +1841,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); if (!skb) { - DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n"); rc = -ENOMEM; goto test_loopback_exit; } @@ -1887,7 +1855,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { rc = -ENOMEM; dev_kfree_skb(skb); - DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n"); + BNX2X_ERR("Unable to map SKB\n"); goto test_loopback_exit; } @@ -2017,13 +1985,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp) res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); if (res) { - DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res); + DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); rc |= BNX2X_PHY_LOOPBACK_FAILED; } res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); if (res) { - DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res); + DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); rc |= BNX2X_MAC_LOOPBACK_FAILED; } @@ -2059,7 +2027,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) buf = kmalloc(0x350, GFP_KERNEL); if (!buf) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n"); + DP(NETIF_MSG_PROBE, "kmalloc failed\n"); rc = -ENOMEM; goto test_nvram_exit; } @@ -2067,15 +2035,13 @@ static int bnx2x_test_nvram(struct bnx2x *bp) rc = bnx2x_nvram_read(bp, 0, data, 4); if (rc) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "magic value read (rc %d)\n", rc); + DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); goto test_nvram_exit; } magic = be32_to_cpu(buf[0]); if (magic != 0x669955aa) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "wrong magic value (0x%08x)\n", magic); + DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic); rc = -ENODEV; goto test_nvram_exit; } @@ -2085,15 +2051,15 @@ static int bnx2x_test_nvram(struct bnx2x *bp) rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data, nvram_tbl[i].size); if (rc) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + DP(NETIF_MSG_PROBE, "nvram_tbl[%d] read data (rc %d)\n", i, rc); goto test_nvram_exit; } crc = ether_crc_le(nvram_tbl[i].size, data); if (crc != CRC32_RESIDUAL) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc); + DP(NETIF_MSG_PROBE, + "nvram_tbl[%d] crc value (0x%08x)\n", i, crc); rc = -ENODEV; goto test_nvram_exit; } @@ -2107,13 +2073,10 @@ static int bnx2x_test_nvram(struct bnx2x *bp) /* Send an EMPTY ramrod on the first queue */ static int bnx2x_test_intr(struct bnx2x *bp) { - struct bnx2x_queue_state_params params = {NULL}; + struct bnx2x_queue_state_params params = {0}; - if (!netif_running(bp->dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(bp->dev)) return -ENODEV; - } params.q_obj = &bp->fp->q_obj; params.cmd = BNX2X_Q_CMD_EMPTY; @@ -2129,8 +2092,8 @@ static void bnx2x_self_test(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); u8 is_serdes; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, - "Handling parity error recovery. Try again later\n"); + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); etest->flags |= ETH_TEST_FL_FAILED; return; } @@ -2343,16 +2306,11 @@ static int bnx2x_set_phys_id(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); + if (!netif_running(dev)) return -EAGAIN; - } - if (!bp->port.pmf) { - DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n"); + if (!bp->port.pmf) return -EOPNOTSUPP; - } switch (state) { case ETHTOOL_ID_ACTIVE: @@ -2389,7 +2347,6 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return 0; default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EOPNOTSUPP; } } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index cd6dfa9eaa3a..e5c5982ae06d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -243,6 +243,18 @@ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 +/** +* This file defines HSI constants for the ETH flow +*/ +#ifdef _EVEREST_MICROCODE +#include "Microcode\Generated\DataTypes\eth_rx_bd.h" +#include "Microcode\Generated\DataTypes\eth_tx_bd.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe.h" +#include "Microcode\Generated\DataTypes\eth_rx_sge.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" +#endif + + /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 #define FIRST_BD_IN_PKT 0 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5d71b7d43237..a1413ad7757d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1129,8 +1129,6 @@ struct shm_dev_info { /* size */ #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) -#define MFW_TRACE_SIGNATURE 0x54524342 - /**************************************************************************** * Driver <-> FW Mailbox * ****************************************************************************/ @@ -1835,9 +1833,6 @@ struct lldp_local_mib { #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 #define DCBX_LOCAL_APP_MISMATCH 0x00000020 #define DCBX_REMOTE_MIB_ERROR 0x00000040 - #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080 - #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100 - #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200 struct dcbx_features features; u32 suffix_seq_num; }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f7f9aa807264..a743a5fcb22c 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -375,6 +375,9 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); + + DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n", + idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); } REG_WR(bp, dmae_reg_go_c[idx], 1); } @@ -439,6 +442,10 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; + DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + /* * Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and @@ -455,6 +462,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, /* wait for completion */ udelay(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); if (!cnt || (bp->recovery_state != BNX2X_RECOVERY_DONE && @@ -471,6 +479,10 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, rc = DMAE_PCI_ERROR; } + DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n", + bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], + bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + unlock: spin_unlock_bh(&bp->dmae_lock); return rc; @@ -484,6 +496,9 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, if (!bp->dmae_ready) { u32 *data = bnx2x_sp(bp, wb_data[0]); + DP(BNX2X_MSG_OFF, + "DMAE is not ready (dst_addr %08x len32 %d) using indirect\n", + dst_addr, len32); if (CHIP_IS_E1(bp)) bnx2x_init_ind_wr(bp, dst_addr, data, len32); else @@ -515,10 +530,13 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) u32 *data = bnx2x_sp(bp, wb_data[0]); int i; - if (CHIP_IS_E1(bp)) + if (CHIP_IS_E1(bp)) { + DP(BNX2X_MSG_OFF, + "DMAE is not ready (src_addr %08x len32 %d) using indirect\n", + src_addr, len32); for (i = 0; i < len32; i++) data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); - else + } else for (i = 0; i < len32; i++) data[i] = REG_RD(bp, src_addr + i*4); @@ -603,7 +621,8 @@ static int bnx2x_mc_assert(struct bnx2x *bp) XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -630,7 +649,8 @@ static int bnx2x_mc_assert(struct bnx2x *bp) TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -657,7 +677,8 @@ static int bnx2x_mc_assert(struct bnx2x *bp) CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -684,7 +705,8 @@ static int bnx2x_mc_assert(struct bnx2x *bp) USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x" + " 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { @@ -713,23 +735,13 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) - BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); + printk("%s" "MCP PC at 0x%x\n", lvl, val); if (BP_PATH(bp) == 0) trace_shmem_base = bp->common.shmem_base; else trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); - addr = trace_shmem_base - 0x800; - - /* validate TRCB signature */ - mark = REG_RD(bp, addr); - if (mark != MFW_TRACE_SIGNATURE) { - BNX2X_ERR("Trace buffer signature is missing."); - return ; - } - - /* read cyclic buffer pointer */ - addr += 4; + addr = trace_shmem_base - 0x0800 + 4; mark = REG_RD(bp, addr); mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + ((mark + 0x3) & ~0x3) - 0x08000000; @@ -775,7 +787,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* Indices */ /* Common */ - BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" + " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", bp->def_idx, bp->def_att_idx, bp->attn_state, bp->spq_prod_idx, bp->stats_counter); BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", @@ -822,11 +835,14 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct bnx2x_fp_txdata txdata; /* Rx */ - BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", + BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" + " rx_comp_prod(0x%x)" + " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, fp->rx_comp_prod, fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); - BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", + BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" + " fp_hc_idx(0x%x)\n", fp->rx_sge_prod, fp->last_max_sge, le16_to_cpu(fp->fp_hc_idx)); @@ -834,7 +850,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) for_each_cos_in_tx_queue(fp, cos) { txdata = fp->txdata[cos]; - BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" + " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" + " *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, txdata.tx_bd_cons, @@ -876,7 +894,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) j * sizeof(u32)); if (!CHIP_IS_E1x(bp)) { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", sb_data_e2.common.p_func.pf_id, sb_data_e2.common.p_func.vf_id, sb_data_e2.common.p_func.vf_valid, @@ -884,7 +904,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) sb_data_e2.common.same_igu_sb_1b, sb_data_e2.common.state); } else { - pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", sb_data_e1x.common.p_func.pf_id, sb_data_e1x.common.p_func.vf_id, sb_data_e1x.common.p_func.vf_valid, @@ -895,17 +917,21 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* SB_SMs data */ for (j = 0; j < HC_SB_MAX_SM; j++) { - pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", - j, hc_sm_p[j].__flags, - hc_sm_p[j].igu_sb_id, - hc_sm_p[j].igu_seg_id, - hc_sm_p[j].time_to_expire, - hc_sm_p[j].timer_value); + pr_cont("SM[%d] __flags (0x%x) " + "igu_sb_id (0x%x) igu_seg_id(0x%x) " + "time_to_expire (0x%x) " + "timer_value(0x%x)\n", j, + hc_sm_p[j].__flags, + hc_sm_p[j].igu_sb_id, + hc_sm_p[j].igu_seg_id, + hc_sm_p[j].time_to_expire, + hc_sm_p[j].timer_value); } /* Indecies data */ for (j = 0; j < loop; j++) { - pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, + pr_cont("INDEX[%d] flags (0x%x) " + "timeout (0x%x)\n", j, hc_index_p[j].flags, hc_index_p[j].timeout); } @@ -959,7 +985,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct sw_tx_bd *sw_bd = &txdata->tx_buf_ring[j]; - BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", + BNX2X_ERR("fp%d: txdata %d, " + "packet[%x]=[%p,%x]\n", i, cos, j, sw_bd->skb, sw_bd->first_bd); } @@ -969,7 +996,8 @@ void bnx2x_panic_dump(struct bnx2x *bp) for (j = start; j != end; j = TX_BD(j + 1)) { u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; - BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" + "[%x:%x:%x:%x]\n", i, cos, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); } @@ -1205,8 +1233,6 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { BNX2X_ERR("FW final cleanup did not succeed\n"); - DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", - (REG_RD(bp, comp_addr))); ret = 1; } /* Zero completion for nxt FLR */ @@ -1376,8 +1402,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(bp)) { - DP(NETIF_MSG_IFUP, - "write %x to HC %d (addr 0x%x)\n", val, port, addr); + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); REG_WR(bp, addr, val); @@ -1388,9 +1414,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp) if (CHIP_IS_E1(bp)) REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); - DP(NETIF_MSG_IFUP, - "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, - (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", + val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, addr, val); /* @@ -1445,7 +1470,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp) IGU_PF_CONF_SINGLE_ISR_EN); } - DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", + DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n", val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); @@ -1503,8 +1528,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); - DP(NETIF_MSG_IFDOWN, - "write %x to HC %d (addr 0x%x)\n", + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ @@ -1523,7 +1547,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); - DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); + DP(NETIF_MSG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mmiowb(); @@ -1582,12 +1606,11 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) int func = BP_FUNC(bp); u32 hw_lock_control_reg; - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, - "Trying to take a lock on resource %d\n", resource); + DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + DP(NETIF_MSG_HW, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return false; @@ -1605,8 +1628,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return true; - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, - "Failed to get a lock on resource %d\n", resource); + DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource); return false; } @@ -1667,7 +1689,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): - DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); + DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; break; @@ -1809,7 +1831,8 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } @@ -1824,7 +1847,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is not already taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (lock_status & resource_bit) { - BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); return -EEXIST; } @@ -1839,7 +1862,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) msleep(5); } - BNX2X_ERR("Timeout\n"); + DP(NETIF_MSG_HW, "Timeout\n"); return -EAGAIN; } @@ -1855,9 +1878,12 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) int func = BP_FUNC(bp); u32 hw_lock_control_reg; + DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource); + /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + DP(NETIF_MSG_HW, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return -EINVAL; } @@ -1872,7 +1898,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", + DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", lock_status, resource_bit); return -EFAULT; } @@ -1933,8 +1959,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: - DP(NETIF_MSG_LINK, - "Set GPIO %d (shift %d) -> output low\n", + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -1942,8 +1967,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: - DP(NETIF_MSG_LINK, - "Set GPIO %d (shift %d) -> output high\n", + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -1951,8 +1975,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: - DP(NETIF_MSG_LINK, - "Set GPIO %d (shift %d) -> input\n", + DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); @@ -2036,18 +2059,16 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: - DP(NETIF_MSG_LINK, - "Clear GPIO INT %d (shift %d) -> output low\n", - gpio_num, gpio_shift); + DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " + "output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: - DP(NETIF_MSG_LINK, - "Set GPIO INT %d (shift %d) -> output high\n", - gpio_num, gpio_shift); + DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " + "output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); @@ -2080,21 +2101,21 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) switch (mode) { case MISC_REGISTERS_SPIO_OUTPUT_LOW: - DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); + DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); /* clear FLOAT and set CLR */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); break; case MISC_REGISTERS_SPIO_OUTPUT_HIGH: - DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); + DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); /* clear FLOAT and set SET */ spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); break; case MISC_REGISTERS_SPIO_INPUT_HI_Z: - DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); + DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); /* set FLOAT */ spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); break; @@ -2536,7 +2557,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp) u32 val; bp->port.pmf = 1; - DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); /* * We need the mb() to ensure the ordering between the writing to @@ -3117,12 +3138,12 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) * locks */ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { - DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); + DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); bp->flags |= MF_FUNC_DIS; bnx2x_e1h_disable(bp); } else { - DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); bp->flags &= ~MF_FUNC_DIS; bnx2x_e1h_enable(bp); @@ -3149,7 +3170,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) if (bp->spq_prod_bd == bp->spq_last_bd) { bp->spq_prod_bd = bp->spq; bp->spq_prod_idx = 0; - DP(BNX2X_MSG_SP, "end of spq\n"); + DP(NETIF_MSG_TIMER, "end of spq\n"); } else { bp->spq_prod_bd++; bp->spq_prod_idx++; @@ -3218,10 +3239,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, bool common = bnx2x_is_contextless_ramrod(command, cmd_type); #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - BNX2X_ERR("Can't post SP when there is panic\n"); + if (unlikely(bp->panic)) return -EIO; - } #endif spin_lock_bh(&bp->spq_lock); @@ -3268,8 +3287,9 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, atomic_dec(&bp->cq_spq_left); - DP(BNX2X_MSG_SP, - "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", + DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " + "type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, @@ -3461,8 +3481,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) ext_phy_config); /* log the failure */ - netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" - "Please contact OEM Support for assistance\n"); + netdev_err(bp->dev, "Fan Failure on Network Controller has caused" + " the driver to shutdown the card to prevent permanent" + " damage. Please contact OEM Support for assistance\n"); /* * Scheudle device reset (unload) @@ -3808,7 +3829,7 @@ void bnx2x_set_pf_load(struct bnx2x *bp) bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; @@ -3845,7 +3866,7 @@ bool bnx2x_clear_pf_load(struct bnx2x *bp) bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); + DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; @@ -3877,12 +3898,11 @@ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) BNX2X_PATH0_LOAD_CNT_SHIFT); u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); + DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); val = (val & mask) >> shift; - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", - engine, val); + DP(NETIF_MSG_HW, "load mask for engine %d = 0x%x\n", engine, val); return val != 0; } @@ -4170,8 +4190,9 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { int par_num = 0; - DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" - "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " + "[4]:0x%08x\n", sig[0] & HW_PRTY_ASSERT_SET_0, sig[1] & HW_PRTY_ASSERT_SET_1, sig[2] & HW_PRTY_ASSERT_SET_2, @@ -4241,25 +4262,34 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BNX2X_ERR("PGLUE hw attention 0x%x\n", val); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) - BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_" + "CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); @@ -4267,15 +4297,19 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG" + "_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) - BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); + BNX2X_ERR("ATC_ATC_INT_STS_REG_" + "ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | @@ -4334,7 +4368,8 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) if (deasserted & (1 << index)) { group_mask = &bp->attn_group[index]; - DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", + DP(NETIF_MSG_HW, "group[%d]: %08x %08x " + "%08x %08x %08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], @@ -4494,7 +4529,6 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { case BNX2X_FILTER_MAC_PENDING: - DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); #ifdef BCM_CNIC if (cid == BNX2X_ISCSI_ETH_CID) vlan_mac_obj = &bp->iscsi_l2_mac_obj; @@ -4504,7 +4538,6 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, break; case BNX2X_FILTER_MCAST_PENDING: - DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); /* This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ @@ -4606,8 +4639,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: - DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, - "got statistics comp event %d\n", + DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", bp->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; @@ -4634,7 +4666,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: - DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_STOP)) break; @@ -4642,23 +4674,21 @@ static void bnx2x_eq_int(struct bnx2x *bp) goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: - DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_TX_START)) break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: - DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, - "got FUNC_START ramrod\n"); + DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) break; goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: - DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, - "got FUNC_STOP ramrod\n"); + DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) break; @@ -4740,7 +4770,7 @@ static void bnx2x_sp_task(struct work_struct *work) /* if (status == 0) */ /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ - DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status); + DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); /* HW attentions */ if (status & BNX2X_DEF_SB_ATT_IDX) { @@ -4774,7 +4804,7 @@ static void bnx2x_sp_task(struct work_struct *work) } if (unlikely(status)) - DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n", + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", status); bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, @@ -5052,7 +5082,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); - DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); + DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id); /* write indecies to HW */ bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); @@ -5430,7 +5460,8 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) */ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); - DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " + "cl_id %d fw_sb %d igu_sb %d\n", fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, @@ -5517,7 +5548,8 @@ static int bnx2x_gunzip_init(struct bnx2x *bp) bp->gunzip_buf = NULL; gunzip_nomem1: - BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); + netdev_err(bp->dev, "Cannot allocate firmware buffer for" + " un-compression\n"); return -ENOMEM; } @@ -5569,8 +5601,8 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); if (bp->gunzip_outlen & 0x3) - netdev_err(bp->dev, - "Firmware decompression error: gunzip_outlen (%d) not aligned\n", + netdev_err(bp->dev, "Firmware decompression error:" + " gunzip_outlen (%d) not aligned\n", bp->gunzip_outlen); bp->gunzip_outlen >>= 2; @@ -5989,7 +6021,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) { u32 val; - DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); + DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); /* * take the UNDI lock to protect undi_unload flow from accessing @@ -6313,9 +6345,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) if (sizeof(union cdu_context) != 1024) /* we currently assume that a context is 1024 bytes */ - dev_alert(&bp->pdev->dev, - "please adjust the size of cdu_context(%ld)\n", - (long)sizeof(union cdu_context)); + dev_alert(&bp->pdev->dev, "please adjust the size " + "of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; @@ -6444,7 +6476,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x__link_reset(bp); - DP(NETIF_MSG_HW, "starting port init port %d\n", port); + DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); @@ -6667,7 +6699,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) u32 main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; - DP(NETIF_MSG_HW, "starting func init func %d\n", func); + DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); /* FLR cleanup - hmmm */ if (!CHIP_IS_E1x(bp)) { @@ -6927,9 +6959,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) val = REG_RD(bp, main_mem_prty_clr); if (val) - DP(NETIF_MSG_HW, - "Hmmm... Parity errors in HC block during function init (0x%x)!\n", - val); + DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC " + "block during " + "function init (0x%x)!\n", val); /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; @@ -7057,7 +7089,6 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) alloc_mem_err: BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); - BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } @@ -7121,7 +7152,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) alloc_mem_err: bnx2x_free_mem(bp); - BNX2X_ERR("Can't allocate memory\n"); return -ENOMEM; } @@ -7187,9 +7217,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) unsigned long ramrod_flags = 0; #ifdef BCM_CNIC - if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { - DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, - "Ignoring Zero MAC for STORAGE SD mode\n"); + if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) { + DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n"); return 0; } #endif @@ -7222,13 +7251,14 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) /* falling through... */ case INT_MODE_INTx: bp->num_queues = 1 + NON_ETH_CONTEXT_USE; - BNX2X_DEV_INFO("set number of queues to 1\n"); + DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); break; default: /* Set number of queues according to bp->multi_mode value */ bnx2x_set_num_queues(bp); - BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); + DP(NETIF_MSG_IFUP, "set number of queues to %d\n", + bp->num_queues); /* if we can't use MSI-X we only need one fp, * so try to enable MSI-X with the requested number of fp's @@ -7236,9 +7266,13 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) */ if (bnx2x_enable_msix(bp)) { /* failed to enable MSI-X */ - BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", - bp->num_queues, 1 + NON_ETH_CONTEXT_USE); - + if (bp->multi_mode) + DP(NETIF_MSG_IFUP, + "Multi requested but failed to " + "enable MSI-X (%d), " + "set number of queues to %d\n", + bp->num_queues, + 1 + NON_ETH_CONTEXT_USE); bp->num_queues = 1 + NON_ETH_CONTEXT_USE; /* Try to enable MSI */ @@ -7276,7 +7310,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) #endif ilt_client->end = line - 1; - DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, @@ -7297,8 +7332,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->end = line - 1; - DP(NETIF_MSG_IFUP, - "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, @@ -7316,8 +7351,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) line += SRC_ILT_LINES; ilt_client->end = line - 1; - DP(NETIF_MSG_IFUP, - "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, @@ -7338,8 +7373,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) line += TM_ILT_LINES; ilt_client->end = line - 1; - DP(NETIF_MSG_IFUP, - "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, " + "flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, @@ -7400,7 +7435,7 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, /* set maximum number of COSs supported by this queue */ init_params->max_cos = fp->max_cos; - DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", + DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ @@ -7431,8 +7466,9 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Set Tx TX_ONLY_SETUP parameters */ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); - DP(NETIF_MSG_IFUP, - "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", + DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" + "cos %d, primary cid %d, cid %d, " + "client id %d, sp-client id %d, flags %lx\n", tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, tx_only_params->gen_params.spcl_id, tx_only_params->flags); @@ -7456,7 +7492,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool leading) { - struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_state_params q_params = {0}; struct bnx2x_queue_setup_params *setup_params = &q_params.params.setup; struct bnx2x_queue_setup_tx_only_params *tx_only_params = @@ -7464,7 +7500,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, int rc; u8 tx_index; - DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); + DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index); /* reset IGU state skip FCoE L2 queue */ if (!IS_FCOE_FP(fp)) @@ -7488,7 +7524,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, return rc; } - DP(NETIF_MSG_IFUP, "init complete\n"); + DP(BNX2X_MSG_SP, "init complete\n"); /* Now move the Queue to the SETUP state... */ @@ -7539,10 +7575,10 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; struct bnx2x_fp_txdata *txdata; - struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_state_params q_params = {0}; int rc, tx_index; - DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); + DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid); q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ @@ -7557,7 +7593,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) /* ascertain this is a normal queue*/ txdata = &fp->txdata[tx_index]; - DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", + DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n", txdata->txq_index); /* send halt terminate on tx-only connection */ @@ -7715,7 +7751,7 @@ static void bnx2x_reset_port(struct bnx2x *bp) static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -7730,7 +7766,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) static inline int bnx2x_func_stop(struct bnx2x *bp) { - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; int rc; /* Prepare parameters for function state transitions */ @@ -7749,7 +7785,8 @@ static inline int bnx2x_func_stop(struct bnx2x *bp) #ifdef BNX2X_STOP_ON_ERROR return rc; #else - BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " + "transaction\n"); __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return bnx2x_func_state_change(bp, &func_params); #endif @@ -7812,12 +7849,14 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) else { int path = BP_PATH(bp); - DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " + "%d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; - DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " + "%d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) @@ -7880,17 +7919,16 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp) if (bnx2x_func_get_state(bp, &bp->func_obj) != BNX2X_F_STATE_STARTED) { #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("Wrong function state\n"); return -EBUSY; #else /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit */ - struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_state_params func_params = {0}; - DP(NETIF_MSG_IFDOWN, - "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " + "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -7914,7 +7952,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) int port = BP_PORT(bp); int i, rc = 0; u8 cos; - struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct bnx2x_mcast_ramrod_params rparam = {0}; u32 reset_code; /* Wait until tx fastpath tasks complete */ @@ -7941,8 +7979,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, true); if (rc < 0) - BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", - rc); + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " + "%d\n", rc); /* Disable LLH */ if (!CHIP_IS_E1(bp)) @@ -8035,7 +8073,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) { u32 val; - DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); + DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n"); if (CHIP_IS_E1(bp)) { int port = BP_PORT(bp); @@ -8088,7 +8126,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", + DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); mmiowb(); } @@ -8130,7 +8168,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) u32 shmem; u32 validity_offset; - DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); + DP(NETIF_MSG_HW, "Starting\n"); /* Set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(bp)) @@ -8367,8 +8405,12 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) } while (cnt-- > 0); if (cnt <= 0) { - BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); - BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there" + " are still" + " outstanding read requests after 1s!\n"); + DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x," + " port_is_idle_0=0x%08x," + " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return -EAGAIN; @@ -8462,8 +8504,8 @@ int bnx2x_leader_reset(struct bnx2x *bp) /* Try to recover after the failure */ if (bnx2x_process_kill(bp, global)) { - BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", - BP_PATH(bp)); + netdev_err(bp->dev, "Something bad had happen on engine %d! " + "Aii!\n", BP_PATH(bp)); rc = -EAGAIN; goto exit_leader_reset2; } @@ -8643,7 +8685,9 @@ static void bnx2x_parity_recover(struct bnx2x *bp) if (bnx2x_nic_load(bp, LOAD_NORMAL)) { error_unrecovered++; netdev_err(bp->dev, - "Recovery failed. Power cycle needed\n"); + "Recovery failed. " + "Power cycle " + "needed\n"); /* Disconnect this device */ netif_device_detach(bp->dev); /* Shut down the power */ @@ -8686,7 +8730,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) /* if stop on error is defined no recovery flows should be executed */ #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " + "so reset not done to allow debug dump,\n" "you will need to reboot when done\n"); goto sp_rtnl_not_reset; #endif @@ -8729,7 +8774,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) * damage */ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { - DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); + DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n"); netif_device_detach(bp->dev); bnx2x_close(bp->dev); } @@ -8957,8 +9002,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->pfid = bp->pf_num; /* 0..7 */ } - BNX2X_DEV_INFO("pf_id: %x", bp->pfid); - bp->link_params.chip_id = bp->common.chip_id; BNX2X_DEV_INFO("chip ID is 0x%x\n", id); @@ -9016,8 +9059,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) if (val < BNX2X_BC_VER) { /* for now only warn * later we might need to enforce this */ - BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", - BNX2X_BC_VER, val); + BNX2X_ERR("This driver needs bc_ver %X but found %X, " + "please upgrade BC\n", BNX2X_BC_VER, val); } bp->link_params.feature_config_flags |= (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? @@ -9158,7 +9201,8 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, } if (!(bp->port.supported[0] || bp->port.supported[1])) { - BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", + BNX2X_ERR("NVRAM config error. BAD phy config." + "PHY1 config 0x%x, PHY2 config 0x%x\n", SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(bp, @@ -9270,7 +9314,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9287,7 +9333,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9303,7 +9351,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9321,7 +9371,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9337,7 +9389,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9353,7 +9407,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9369,7 +9425,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { - BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "Invalid link_config 0x%x" + " speed_cap_mask 0x%x\n", link_config, bp->link_params.speed_cap_mask[idx]); return; @@ -9380,7 +9438,8 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) break; default: - BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", + BNX2X_ERR("NVRAM config error. " + "BAD link speed link_config 0x%x\n", link_config); bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; @@ -9398,7 +9457,8 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) BNX2X_FLOW_CTRL_NONE; } - BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl" + " 0x%x advertising 0x%x\n", bp->link_params.req_line_speed[idx], bp->link_params.req_duplex[idx], bp->link_params.req_flow_ctrl[idx], @@ -9447,7 +9507,8 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->wol = (!(bp->flags & NO_WOL_FLAG) && (config & PORT_FEATURE_WOL_ENABLED)); - BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", + BNX2X_DEV_INFO("lane_config 0x%08x " + "speed_cap_mask0 0x%08x link_config0 0x%08x\n", bp->link_params.lane_config, bp->link_params.speed_cap_mask[0], bp->port.link_config[0]); @@ -9489,7 +9550,6 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) void bnx2x_get_iscsi_info(struct bnx2x *bp) { - u32 no_flags = NO_ISCSI_FLAG; #ifdef BCM_CNIC int port = BP_PORT(bp); @@ -9509,28 +9569,12 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp) * disable the feature. */ if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= no_flags; + bp->flags |= NO_ISCSI_FLAG; #else - bp->flags |= no_flags; + bp->flags |= NO_ISCSI_FLAG; #endif } -#ifdef BCM_CNIC -static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) -{ - /* Port info */ - bp->cnic_eth_dev.fcoe_wwn_port_name_hi = - MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); - bp->cnic_eth_dev.fcoe_wwn_port_name_lo = - MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); - - /* Node info */ - bp->cnic_eth_dev.fcoe_wwn_node_name_hi = - MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); - bp->cnic_eth_dev.fcoe_wwn_node_name_lo = - MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); -} -#endif static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) { #ifdef BCM_CNIC @@ -9573,11 +9617,24 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) * Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) - bnx2x_get_ext_wwn_info(bp, func); - - } else if (IS_MF_FCOE_SD(bp)) - bnx2x_get_ext_wwn_info(bp, func); + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_lower); + } + } BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); @@ -9630,11 +9687,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) /* * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or * FCoE MAC then the appropriate feature should be disabled. - * - * In non SD mode features configuration comes from - * struct func_ext_config. */ - if (!IS_MF_SD(bp)) { + if (IS_MF_SI(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. @@ -9658,25 +9712,16 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) } else bp->flags |= NO_FCOE_FLAG; - } else { /* SD MODE */ - if (IS_MF_STORAGE_SD(bp)) { - if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { - /* use primary mac as iscsi mac */ - memcpy(iscsi_mac, bp->dev->dev_addr, - ETH_ALEN); - - BNX2X_DEV_INFO("SD ISCSI MODE\n"); - BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", - iscsi_mac); - } else { /* FCoE */ - memcpy(fip_mac, bp->dev->dev_addr, - ETH_ALEN); - BNX2X_DEV_INFO("SD FCoE MODE\n"); - BNX2X_DEV_INFO("Read FIP MAC: %pM\n", - fip_mac); - } + } else { /* SD mode */ + if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) { + /* use primary mac as iscsi mac */ + memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); /* Zero primary MAC configuration */ memset(bp->dev->dev_addr, 0, ETH_ALEN); + + BNX2X_DEV_INFO("SD ISCSI MODE\n"); + BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", + iscsi_mac); } } #endif @@ -9705,6 +9750,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); #ifdef BCM_CNIC + /* Set the FCoE MAC in MF_SD mode */ + if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + /* Disable iSCSI if MAC configuration is * invalid. */ @@ -9724,11 +9773,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) dev_err(&bp->pdev->dev, - "bad Ethernet MAC address configuration: %pM\n" - "change it manually before bringing up the appropriate network interface\n", + "bad Ethernet MAC address configuration: " + "%pM, change it manually before bringing up " + "the appropriate network interface\n", bp->dev->dev_addr); - - } static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) @@ -9849,7 +9897,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else - BNX2X_DEV_INFO("illegal MAC address for SI\n"); + BNX2X_DEV_INFO("illegal MAC address " + "for SI\n"); break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: /* get OV configuration */ @@ -9867,7 +9916,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; - BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); + BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); } } @@ -9882,24 +9931,25 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_ov = val; bp->path_has_ovlan = true; - BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", - func, bp->mf_ov, bp->mf_ov); + BNX2X_DEV_INFO("MF OV for func %d is %d " + "(0x%04x)\n", func, bp->mf_ov, + bp->mf_ov); } else { dev_err(&bp->pdev->dev, - "No valid MF OV for func %d, aborting\n", - func); + "No valid MF OV for func %d, " + "aborting\n", func); return -EPERM; } break; case MULTI_FUNCTION_SI: - BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", - func); + BNX2X_DEV_INFO("func %d is in MF " + "switch-independent mode\n", func); break; default: if (vn) { dev_err(&bp->pdev->dev, - "VN %d is in a single function mode, aborting\n", - vn); + "VN %d is in a single function mode, " + "aborting\n", vn); return -EPERM; } break; @@ -10107,14 +10157,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) dev_err(&bp->pdev->dev, "FPGA detected\n"); if (BP_NOMCP(bp) && (func == 0)) - dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); + dev_err(&bp->pdev->dev, "MCP disabled, " + "must load devices in order!\n"); bp->multi_mode = multi_mode; bp->disable_tpa = disable_tpa; #ifdef BCM_CNIC - bp->disable_tpa |= IS_MF_STORAGE_SD(bp); + bp->disable_tpa |= IS_MF_ISCSI_SD(bp); #endif /* Set TPA flags */ @@ -10231,8 +10282,10 @@ static int bnx2x_open(struct net_device *dev) bnx2x_set_power_state(bp, PCI_D3hot); bp->recovery_state = BNX2X_RECOVERY_FAILED; - BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" - "If you still see this message after a few retries then power cycle is required.\n"); + netdev_err(bp->dev, "Recovery flow hasn't been properly" + " completed yet. Try again later. If u still see this" + " message after a few retries then power cycle is" + " required.\n"); return -EAGAIN; } while (0); @@ -10331,7 +10384,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp) static inline int bnx2x_set_mc_list(struct bnx2x *bp) { struct net_device *dev = bp->dev; - struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct bnx2x_mcast_ramrod_params rparam = {0}; int rc = 0; rparam.mcast_obj = &bp->mcast_obj; @@ -10339,7 +10392,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) /* first, clear all configured multicast MACs */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); if (rc < 0) { - BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); + BNX2X_ERR("Failed to clear multicast " + "configuration: %d\n", rc); return rc; } @@ -10347,8 +10401,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) if (netdev_mc_count(dev)) { rc = bnx2x_init_mcast_macs_list(bp, &rparam); if (rc) { - BNX2X_ERR("Failed to create multicast MACs list: %d\n", - rc); + BNX2X_ERR("Failed to create multicast MACs " + "list: %d\n", rc); return rc; } @@ -10356,8 +10410,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp) rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_ADD); if (rc < 0) - BNX2X_ERR("Failed to set a new multicast configuration: %d\n", - rc); + BNX2X_ERR("Failed to set a new multicast " + "configuration: %d\n", rc); bnx2x_free_mcast_macs_list(&rparam); } @@ -10441,9 +10495,8 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, struct bnx2x *bp = netdev_priv(netdev); int rc; - DP(NETIF_MSG_LINK, - "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", - prtad, devad, addr, value); + DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," + " value 0x%x\n", prtad, devad, addr, value); /* The HW expects different devad if CL22 is used */ devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; @@ -10484,10 +10537,8 @@ static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { - BNX2X_ERR("Non-valid Ethernet address\n"); + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) return -EADDRNOTAVAIL; - } return 0; } @@ -10521,7 +10572,8 @@ static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { bp->flags |= USING_DAC_FLAG; if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { - dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); + dev_err(dev, "dma_set_coherent_mask failed, " + "aborting\n"); return -EIO; } } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { @@ -10592,7 +10644,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, } if (!pci_is_pcie(pdev)) { - dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); rc = -EIO; goto err_out_release; } @@ -10628,7 +10680,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT); } - BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); + DP(BNX2X_MSG_SP, "me reg PF num: %d\n", bp->pf_num); bnx2x_set_power_state(bp, PCI_D0); @@ -10730,10 +10782,8 @@ static int bnx2x_check_firmware(struct bnx2x *bp) int i; const u8 *fw_ver; - if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { - BNX2X_ERR("Wrong FW size\n"); + if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) return -EINVAL; - } fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; sections = (struct bnx2x_fw_file_section *)fw_hdr; @@ -10744,7 +10794,8 @@ static int bnx2x_check_firmware(struct bnx2x *bp) offset = be32_to_cpu(sections[i].offset); len = be32_to_cpu(sections[i].len); if (offset + len > firmware->size) { - BNX2X_ERR("Section %d length is out of bounds\n", i); + dev_err(&bp->pdev->dev, + "Section %d length is out of bounds\n", i); return -EINVAL; } } @@ -10756,7 +10807,8 @@ static int bnx2x_check_firmware(struct bnx2x *bp) for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { if (be16_to_cpu(ops_offsets[i]) > num_ops) { - BNX2X_ERR("Section offset %d is out of bounds\n", i); + dev_err(&bp->pdev->dev, + "Section offset %d is out of bounds\n", i); return -EINVAL; } } @@ -10768,9 +10820,10 @@ static int bnx2x_check_firmware(struct bnx2x *bp) (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { - BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], - BCM_5710_FW_MAJOR_VERSION, + dev_err(&bp->pdev->dev, + "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", + fw_ver[0], fw_ver[1], fw_ver[2], + fw_ver[3], BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); @@ -10852,7 +10905,7 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -static int bnx2x_init_firmware(struct bnx2x *bp) +int bnx2x_init_firmware(struct bnx2x *bp) { const char *fw_file_name; struct bnx2x_fw_file_hdr *fw_hdr; @@ -11089,7 +11142,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp = netdev_priv(dev); - BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", + DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", tx_count, rx_count); bp->igu_sb_cnt = max_non_def_sbs; @@ -11102,7 +11155,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, return rc; } - BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); + DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs); rc = bnx2x_init_bp(bp); if (rc) @@ -11157,8 +11210,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); - BNX2X_DEV_INFO( - "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", board_info[ent->driver_data].name, (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), pcie_width, @@ -11377,7 +11429,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev) struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); return; } @@ -11528,7 +11581,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) spe = bnx2x_sp_get_next(bp); *spe = *bp->cnic_kwq_cons; - DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", + DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", bp->cnic_spq_pending, bp->cnic_kwq_pending, count); if (bp->cnic_kwq_cons == bp->cnic_kwq_last) @@ -11547,15 +11600,14 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev, int i; #ifdef BNX2X_STOP_ON_ERROR - if (unlikely(bp->panic)) { - BNX2X_ERR("Can't post to SP queue while panic\n"); + if (unlikely(bp->panic)) return -EIO; - } #endif if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { - BNX2X_ERR("Handling parity error recovery. Try again later\n"); + netdev_err(dev, "Handling parity error recovery. Try again " + "later\n"); return -EAGAIN; } @@ -11571,7 +11623,7 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev, bp->cnic_kwq_pending++; - DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", + DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", spe->hdr.conn_and_cmd_data, spe->hdr.type, spe->data.update_data_addr.hi, spe->data.update_data_addr.lo, @@ -11852,10 +11904,8 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - if (ops == NULL) { - BNX2X_ERR("NULL ops received\n"); + if (ops == NULL) return -EINVAL; - } bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!bp->cnic_kwq) @@ -11938,8 +11988,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) if (NO_FCOE(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; - BNX2X_DEV_INFO( - "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", + DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " + "starting cid %d\n", cp->ctx_blk_size, cp->ctx_tbl_offset, cp->ctx_tbl_len, diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 3f52fadee3ed..484498f6bf1e 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -72,8 +72,8 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp, o->execute = exec; o->get = get; - DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", - exe_len); + DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " + "length of %d\n", exe_len); } static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, @@ -203,7 +203,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ if (!list_empty(&o->pending_comp)) { if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "resetting pending_comp\n"); __bnx2x_exe_queue_reset_pending(bp, o); } else { spin_unlock_bh(&o->lock); @@ -475,14 +476,11 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, } /* check_add() callbacks */ -static int bnx2x_check_mac_add(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, +static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); - if (!is_valid_ether_addr(data->mac.mac)) return -EINVAL; @@ -494,14 +492,11 @@ static int bnx2x_check_mac_add(struct bnx2x *bp, return 0; } -static int bnx2x_check_vlan_add(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, +static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); - list_for_each_entry(pos, &o->head, link) if (data->vlan.vlan == pos->u.vlan.vlan) return -EEXIST; @@ -509,15 +504,11 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp, return 0; } -static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, +static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", - data->vlan_mac.mac, data->vlan_mac.vlan); - list_for_each_entry(pos, &o->head, link) if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, @@ -530,14 +521,11 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_mac_del(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, + bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); - list_for_each_entry(pos, &o->head, link) if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) return pos; @@ -546,14 +534,11 @@ static struct bnx2x_vlan_mac_registry_elem * } static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_del(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, + bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); - list_for_each_entry(pos, &o->head, link) if (data->vlan.vlan == pos->u.vlan.vlan) return pos; @@ -562,15 +547,11 @@ static struct bnx2x_vlan_mac_registry_elem * } static struct bnx2x_vlan_mac_registry_elem * - bnx2x_check_vlan_mac_del(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, + bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data) { struct bnx2x_vlan_mac_registry_elem *pos; - DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", - data->vlan_mac.mac, data->vlan_mac.vlan); - list_for_each_entry(pos, &o->head, link) if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, @@ -581,8 +562,7 @@ static struct bnx2x_vlan_mac_registry_elem * } /* check_move() callback */ -static bool bnx2x_check_move(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *src_o, +static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) { @@ -592,10 +572,10 @@ static bool bnx2x_check_move(struct bnx2x *bp, /* Check if we can delete the requested configuration from the first * object. */ - pos = src_o->check_del(bp, src_o, data); + pos = src_o->check_del(src_o, data); /* check if configuration can be added */ - rc = dst_o->check_add(bp, dst_o, data); + rc = dst_o->check_add(dst_o, data); /* If this classification can not be added (is already set) * or can't be deleted - return an error. @@ -607,7 +587,6 @@ static bool bnx2x_check_move(struct bnx2x *bp, } static bool bnx2x_check_move_always_err( - struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) @@ -761,7 +740,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, &rule_entry->mac.header); DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", - (add ? "add" : "delete"), mac, raw->cl_id); + add ? "add" : "delete", mac, raw->cl_id); /* Set a MAC itself */ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, @@ -854,7 +833,7 @@ static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, cfg_entry); DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", - (add ? "setting" : "clearing"), + add ? "setting" : "clearing", mac, raw->cl_id, cam_offset); } @@ -1173,9 +1152,10 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, int rc; /* Check the registry */ - rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); + rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); if (rc) { - DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); + DP(BNX2X_MSG_SP, "ADD command is not allowed considering " + "current registry state\n"); return rc; } @@ -1226,9 +1206,10 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, /* If this classification can not be deleted (doesn't exist) * - return a BNX2X_EXIST. */ - pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); + pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); if (!pos) { - DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); + DP(BNX2X_MSG_SP, "DEL command is not allowed considering " + "current registry state\n"); return -EEXIST; } @@ -1288,9 +1269,9 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, * Check if we can perform this operation based on the current registry * state. */ - if (!src_o->check_move(bp, src_o, dest_o, - &elem->cmd_data.vlan_mac.u)) { - DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); + if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { + DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " + "current registry state\n"); return -EINVAL; } @@ -1304,7 +1285,8 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, /* Check DEL on source */ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; if (src_exeq->get(src_exeq, &query_elem)) { - BNX2X_ERR("There is a pending DEL command on the source queue already\n"); + BNX2X_ERR("There is a pending DEL command on the source " + "queue already\n"); return -EINVAL; } @@ -1317,7 +1299,8 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, /* Check ADD on destination */ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; if (dest_exeq->get(dest_exeq, &query_elem)) { - BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); + BNX2X_ERR("There is a pending ADD command on the " + "destination queue already\n"); return -EINVAL; } @@ -1492,10 +1475,12 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, &pos->cmd_data.vlan_mac.vlan_mac_flags)) { if ((query.cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { - BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); + BNX2X_ERR("Failed to return the credit for the " + "optimized ADD command\n"); return -EINVAL; } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ - BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); + BNX2X_ERR("Failed to recover the credit from " + "the optimized DEL command\n"); return -EINVAL; } } @@ -1561,7 +1546,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem( reg_elem->vlan_mac_flags = elem->cmd_data.vlan_mac.vlan_mac_flags; } else /* DEL, RESTORE */ - reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); *re = reg_elem; return 0; @@ -1659,8 +1644,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, cmd = elem->cmd_data.vlan_mac.cmd; if ((cmd == BNX2X_VLAN_MAC_DEL) || (cmd == BNX2X_VLAN_MAC_MOVE)) { - reg_elem = o->check_del(bp, o, - &elem->cmd_data.vlan_mac.u); + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); WARN_ON(!reg_elem); @@ -1691,7 +1675,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, if (!restore && ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { - reg_elem = o->check_del(bp, cam_obj, + reg_elem = o->check_del(cam_obj, &elem->cmd_data.vlan_mac.u); if (reg_elem) { list_del(®_elem->link); @@ -1766,7 +1750,8 @@ int bnx2x_config_vlan_mac( rc = 1; if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { - DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "clearing a pending bit.\n"); raw->clear_pending(raw); } @@ -2164,10 +2149,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, mac_filters->unmatched_unicast & ~mask; DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" - "accp_mcast 0x%x\naccp_bcast 0x%x\n", - mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, - mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, - mac_filters->bcast_accept_all); + "accp_mcast 0x%x\naccp_bcast 0x%x\n", + mac_filters->ucast_drop_all, + mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, + mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); /* write the MAC filter structure*/ __storm_memset_mac_filters(bp, mac_filters, p->func_id); @@ -2316,7 +2303,8 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, */ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); - DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", + DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " + "tx_accept_flags 0x%lx\n", data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); @@ -2449,8 +2437,8 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, if (!new_cmd) return -ENOMEM; - DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", - cmd, macs_list_len); + DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " + "macs_list_len=%d\n", cmd, macs_list_len); INIT_LIST_HEAD(&new_cmd->data.macs_head); @@ -2665,7 +2653,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, cnt++; DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - pmac_pos->mac); + pmac_pos->mac); list_del(&pmac_pos->link); @@ -3189,8 +3177,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, * matter. */ if (p->mcast_list_len > o->max_cmd_len) { - BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", - o->max_cmd_len); + BNX2X_ERR("Can't configure more than %d multicast MACs" + "on 57710\n", o->max_cmd_len); return -EINVAL; } /* Every configured MAC should be cleared if DEL command is @@ -3438,7 +3426,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, &data->config_table[i].lsb_mac_addr, elem->mac); DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", - elem->mac); + elem->mac); list_add_tail(&elem->link, &o->registry.exact_match.macs); } @@ -3579,8 +3567,9 @@ int bnx2x_config_mcast(struct bnx2x *bp, if ((!p->mcast_list_len) && (!o->check_sched(o))) return 0; - DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", - o->total_pending_num, p->mcast_list_len, o->max_cmd_len); + DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " + "o->max_cmd_len=%d\n", o->total_pending_num, + p->mcast_list_len, o->max_cmd_len); /* Enqueue the current command to the pending list if we can't complete * it in the current iteration @@ -4305,8 +4294,9 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, unsigned long cur_pending = o->pending; if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", - cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], + BNX2X_ERR("Bad MC reply %d for queue %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], o->state, cur_pending, o->next_state); return -EINVAL; } @@ -4318,13 +4308,13 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", o->next_tx_only, o->max_cos); - DP(BNX2X_MSG_SP, - "Completing command %d for queue %d, setting state to %d\n", - cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); + DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " + "setting state to %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); if (o->next_tx_only) /* print num tx-only if any exist */ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", - o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); o->state = o->next_state; o->num_tx_only = o->next_tx_only; @@ -4539,10 +4529,8 @@ static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, &data->tx, &cmd_params->params.tx_only.flags); - DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", - cmd_params->q_obj->cids[0], - data->tx.tx_bd_page_base.lo, - data->tx.tx_bd_page_base.hi); + DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); } /** @@ -4689,8 +4677,10 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_tx_only(bp, params, rdata); - DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", - o->cids[cid_index], rdata->general.client_id, + DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d," + "sp-client id %d, cos %d\n", + o->cids[cid_index], + rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); /* @@ -5232,9 +5222,9 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, unsigned long cur_pending = o->pending; if (!test_and_clear_bit(cmd, &cur_pending)) { - BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", - cmd, BP_FUNC(bp), o->state, - cur_pending, o->next_state); + BNX2X_ERR("Bad MC reply %d for func %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), + o->state, cur_pending, o->next_state); return -EINVAL; } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 61a7670adfcd..4ce351b4d517 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -315,8 +315,7 @@ struct bnx2x_vlan_mac_obj { * @return zero if the element may be added */ - int (*check_add)(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, + int (*check_add)(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data); /** @@ -325,8 +324,7 @@ struct bnx2x_vlan_mac_obj { * @return true if the element may be deleted */ struct bnx2x_vlan_mac_registry_elem * - (*check_del)(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *o, + (*check_del)(struct bnx2x_vlan_mac_obj *o, union bnx2x_classification_ramrod_data *data); /** @@ -334,8 +332,7 @@ struct bnx2x_vlan_mac_obj { * * @return true if the element may be deleted */ - bool (*check_move)(struct bnx2x *bp, - struct bnx2x_vlan_mac_obj *src_o, + bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index e1c9310fb07c..4cd4f127fe79 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -75,7 +75,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) bp->fw_stats_req->hdr.drv_stats_counter = cpu_to_le16(bp->stats_counter++); - DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", + DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", bp->fw_stats_req->hdr.drv_stats_counter); @@ -128,8 +128,6 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, - sizeof(bp->func_stats)); bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); } } @@ -804,7 +802,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) &bp->fw_stats_data->port.tstorm_port_statistics; struct tstorm_per_pf_stats *tfunc = &bp->fw_stats_data->pf.tstorm_pf_statistics; - struct host_func_stats *fstats = &bp->func_stats; + struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; struct stats_counter *counters = &bp->fw_stats_data->storm_counters; @@ -820,29 +818,29 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, - "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, "stats not updated by xstorm" + " xstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->xstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, - "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, "stats not updated by ustorm" + " ustorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->ustats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, - "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, "stats not updated by cstorm" + " cstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->cstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, - "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", + DP(BNX2X_MSG_STATS, "stats not updated by tstorm" + " tstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->tstats_counter), bp->stats_counter); return -EAGAIN; } @@ -869,7 +867,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) u32 diff; - DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " + "bcast_sent 0x%x mcast_sent 0x%x\n", i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); @@ -1148,9 +1147,51 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i, cos; netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", estats->brb_drop_lo, estats->brb_truncate_lo); + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + + pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n", + fp->name, (le16_to_cpu(*fp->rx_cons_sb) - + fp->rx_comp_cons), + le16_to_cpu(*fp->rx_cons_sb), + bnx2x_hilo(&qstats-> + total_unicast_packets_received_hi), + fp->rx_calls, fp->rx_pkt); + } + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct netdev_queue *txq; + + pr_debug("%s: tx pkt(%lu) (Xoff events %u)", + fp->name, + bnx2x_hilo( + &qstats->total_unicast_packets_transmitted_hi), + qstats->driver_xoff); + + for_each_cos_in_tx_queue(fp, cos) { + txdata = &fp->txdata[cos]; + txq = netdev_get_tx_queue(bp->dev, + FP_COS_TO_TXQ(fp, cos)); + + pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n", + cos, + bnx2x_tx_avail(bp, txdata), + le16_to_cpu(*txdata->tx_cons_sb), + txdata->tx_pkt, + (netif_tx_queue_stopped(txq) ? + "Xoff" : "Xon") + ); + } + } } bnx2x_hw_stats_post(bp); @@ -1309,6 +1350,36 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_stats_comp(bp); } +static void bnx2x_func_stats_base_update(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = bp->func_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + /** * This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and @@ -1500,7 +1571,6 @@ void bnx2x_stats_init(struct bnx2x *bp) memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); - memset(&bp->func_stats, 0, sizeof(bp->func_stats)); /* Clean SP from previous statistics */ if (bp->func_stx) { @@ -1517,6 +1587,10 @@ void bnx2x_stats_init(struct bnx2x *bp) if (bp->port.pmf && bp->port.port_stx) bnx2x_port_stats_base_init(bp); + /* On a non-init, retrieve previous statistics from SP */ + if (!bp->stats_init && bp->func_stx) + bnx2x_func_stats_base_update(bp); + /* mark the end of statistics initializiation */ bp->stats_init = false; } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2b46e1eb7fd1..39ffd6dcdf1a 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -421,7 +421,12 @@ struct bnx2x_fw_port_stats_old { do { \ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ old_uclient->s = uclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_E_USTAT(s, t) \ + do { \ + UPDATE_EXTEND_USTAT(s, t); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ } while (0) #define UPDATE_EXTEND_E_USTAT(s, t) \ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be.h b/trunk/drivers/net/ethernet/emulex/benet/be.h index 9576ac002c23..ab24e4600695 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be.h @@ -52,10 +52,6 @@ #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ #define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ -#define OC_SUBSYS_DEVICE_ID1 0xE602 -#define OC_SUBSYS_DEVICE_ID2 0xE642 -#define OC_SUBSYS_DEVICE_ID3 0xE612 -#define OC_SUBSYS_DEVICE_ID4 0xE652 static inline char *nic_name(struct pci_dev *pdev) { @@ -303,15 +299,12 @@ struct be_vf_cfg { unsigned char mac_addr[ETH_ALEN]; int if_handle; int pmac_id; - u16 def_vid; u16 vlan_tag; u32 tx_rate; }; #define BE_FLAGS_LINK_STATUS_INIT 1 #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) -#define BE_UC_PMAC_COUNT 30 -#define BE_VF_UC_PMAC_COUNT 2 struct be_adapter { struct pci_dev *pdev; @@ -364,7 +357,7 @@ struct be_adapter { /* Ethtool knobs and info */ char fw_ver[FW_VER_LEN]; int if_handle; /* Used to configure filtering */ - u32 *pmac_id; /* MAC addr handle used by BE card */ + u32 pmac_id; /* MAC addr handle used by BE card */ u32 beacon_state; /* for set_phys_id */ bool eeh_err; @@ -372,6 +365,7 @@ struct be_adapter { bool fw_timeout; u32 port_num; bool promiscuous; + bool wol; u32 function_mode; u32 function_caps; u32 rx_fc; /* Rx flow control */ @@ -392,10 +386,6 @@ struct be_adapter { u32 sli_family; u8 hba_port_num; u16 pvid; - u8 wol_cap; - bool wol; - u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ - u32 uc_macs; /* Count of secondary UC MAC programmed */ }; #define be_physfn(adapter) (!adapter->is_virtfn) @@ -559,28 +549,9 @@ static inline bool be_error(struct be_adapter *adapter) return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout; } -static inline bool be_is_wol_excluded(struct be_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - - if (!be_physfn(adapter)) - return true; - - switch (pdev->subsystem_device) { - case OC_SUBSYS_DEVICE_ID1: - case OC_SUBSYS_DEVICE_ID2: - case OC_SUBSYS_DEVICE_ID3: - case OC_SUBSYS_DEVICE_ID4: - return true; - default: - return false; - } -} - extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped); extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); extern void be_parse_stats(struct be_adapter *adapter); extern int be_load_fw(struct be_adapter *adapter, u8 *func); -extern bool be_is_wol_supported(struct be_adapter *adapter); #endif /* BE_H */ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index 67b030d72df1..398fb5ca0fe2 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2418,141 +2418,3 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, spin_unlock_bh(&adapter->mcc_lock); return status; } - -int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, - u32 domain, u16 intf_id) -{ - struct be_mcc_wrb *wrb; - struct be_cmd_req_set_hsw_config *req; - void *ctxt; - int status; - - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - - req = embedded_payload(wrb); - ctxt = &req->context; - - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); - - req->hdr.domain = domain; - AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); - if (pvid) { - AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); - AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); - } - - be_dws_cpu_to_le(req->context, sizeof(req->context)); - status = be_mcc_notify_wait(adapter); - -err: - spin_unlock_bh(&adapter->mcc_lock); - return status; -} - -/* Get Hyper switch config */ -int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, - u32 domain, u16 intf_id) -{ - struct be_mcc_wrb *wrb; - struct be_cmd_req_get_hsw_config *req; - void *ctxt; - int status; - u16 vid; - - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - - req = embedded_payload(wrb); - ctxt = &req->context; - - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); - - req->hdr.domain = domain; - AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, - intf_id); - AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); - be_dws_cpu_to_le(req->context, sizeof(req->context)); - - status = be_mcc_notify_wait(adapter); - if (!status) { - struct be_cmd_resp_get_hsw_config *resp = - embedded_payload(wrb); - be_dws_le_to_cpu(&resp->context, - sizeof(resp->context)); - vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, - pvid, &resp->context); - *pvid = le16_to_cpu(vid); - } - -err: - spin_unlock_bh(&adapter->mcc_lock); - return status; -} - -int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) -{ - struct be_mcc_wrb *wrb; - struct be_cmd_req_acpi_wol_magic_config_v1 *req; - int status; - int payload_len = sizeof(*req); - struct be_dma_mem cmd; - - memset(&cmd, 0, sizeof(struct be_dma_mem)); - cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, - &cmd.dma); - if (!cmd.va) { - dev_err(&adapter->pdev->dev, - "Memory allocation failure\n"); - return -ENOMEM; - } - - if (mutex_lock_interruptible(&adapter->mbox_lock)) - return -1; - - wrb = wrb_from_mbox(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - - req = cmd.va; - - be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, - OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, - payload_len, wrb, &cmd); - - req->hdr.version = 1; - req->query_options = BE_GET_WOL_CAP; - - status = be_mbox_notify_wait(adapter); - if (!status) { - struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; - resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; - - /* the command could succeed misleadingly on old f/w - * which is not aware of the V1 version. fake an error. */ - if (resp->hdr.response_length < payload_len) { - status = -1; - goto err; - } - adapter->wol_cap = resp->wol_settings; - } -err: - mutex_unlock(&adapter->mbox_lock); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); - return status; -} diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h index d5b680c56af0..687c42071411 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -191,8 +191,6 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 #define OPCODE_COMMON_GET_MAC_LIST 147 #define OPCODE_COMMON_SET_MAC_LIST 148 -#define OPCODE_COMMON_GET_HSW_CONFIG 152 -#define OPCODE_COMMON_SET_HSW_CONFIG 153 #define OPCODE_COMMON_READ_OBJECT 171 #define OPCODE_COMMON_WRITE_OBJECT 172 @@ -1208,33 +1206,6 @@ struct be_cmd_req_acpi_wol_magic_config{ u8 rsvd2[2]; } __packed; -struct be_cmd_req_acpi_wol_magic_config_v1 { - struct be_cmd_req_hdr hdr; - u8 rsvd0[2]; - u8 query_options; - u8 rsvd1[5]; - u32 rsvd2[288]; - u8 magic_mac[6]; - u8 rsvd3[22]; -} __packed; - -struct be_cmd_resp_acpi_wol_magic_config_v1 { - struct be_cmd_resp_hdr hdr; - u8 rsvd0[2]; - u8 wol_settings; - u8 rsvd1[5]; - u32 rsvd2[295]; -} __packed; - -#define BE_GET_WOL_CAP 2 - -#define BE_WOL_CAP 0x1 -#define BE_PME_D0_CAP 0x8 -#define BE_PME_D1_CAP 0x10 -#define BE_PME_D2_CAP 0x20 -#define BE_PME_D3HOT_CAP 0x40 -#define BE_PME_D3COLD_CAP 0x80 - /********************** LoopBack test *********************/ struct be_cmd_req_loopback_test { struct be_cmd_req_hdr hdr; @@ -1415,55 +1386,6 @@ struct be_cmd_req_set_mac_list { struct macaddr mac[BE_MAX_MAC]; } __packed; -/*********************** HSW Config ***********************/ -struct amap_set_hsw_context { - u8 interface_id[16]; - u8 rsvd0[14]; - u8 pvid_valid; - u8 rsvd1; - u8 rsvd2[16]; - u8 pvid[16]; - u8 rsvd3[32]; - u8 rsvd4[32]; - u8 rsvd5[32]; -} __packed; - -struct be_cmd_req_set_hsw_config { - struct be_cmd_req_hdr hdr; - u8 context[sizeof(struct amap_set_hsw_context) / 8]; -} __packed; - -struct be_cmd_resp_set_hsw_config { - struct be_cmd_resp_hdr hdr; - u32 rsvd; -}; - -struct amap_get_hsw_req_context { - u8 interface_id[16]; - u8 rsvd0[14]; - u8 pvid_valid; - u8 pport; -} __packed; - -struct amap_get_hsw_resp_context { - u8 rsvd1[16]; - u8 pvid[16]; - u8 rsvd2[32]; - u8 rsvd3[32]; - u8 rsvd4[32]; -} __packed; - -struct be_cmd_req_get_hsw_config { - struct be_cmd_req_hdr hdr; - u8 context[sizeof(struct amap_get_hsw_req_context) / 8]; -} __packed; - -struct be_cmd_resp_get_hsw_config { - struct be_cmd_resp_hdr hdr; - u8 context[sizeof(struct amap_get_hsw_resp_context) / 8]; - u32 rsvd; -}; - /*************** HW Stats Get v1 **********************************/ #define BE_TXP_SW_SZ 48 struct be_port_rxf_stats_v1 { @@ -1668,9 +1590,4 @@ extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, bool *pmac_id_active, u32 *pmac_id, u8 *mac); extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); -extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, - u32 domain, u16 intf_id); -extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, - u32 domain, u16 intf_id); -extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c b/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c index c1ff73cb0e62..30ce17806916 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -600,16 +600,26 @@ be_set_phys_id(struct net_device *netdev, return 0; } +static bool +be_is_wol_supported(struct be_adapter *adapter) +{ + if (!be_physfn(adapter)) + return false; + else + return true; +} static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); - if (be_is_wol_supported(adapter)) { - wol->supported |= WAKE_MAGIC; - wol->wolopts |= WAKE_MAGIC; - } else + if (be_is_wol_supported(adapter)) + wol->supported = WAKE_MAGIC; + + if (adapter->wol) + wol->wolopts = WAKE_MAGIC; + else wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -620,14 +630,9 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct be_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & ~WAKE_MAGIC) - return -EOPNOTSUPP; - - if (!be_is_wol_supported(adapter)) { - dev_warn(&adapter->pdev->dev, "WOL not supported\n"); - return -EOPNOTSUPP; - } + return -EINVAL; - if (wol->wolopts & WAKE_MAGIC) + if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter)) adapter->wol = true; else adapter->wol = false; diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index 528a886bc2cd..e3822788f532 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -235,7 +235,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) struct sockaddr *addr = p; int status = 0; u8 current_mac[ETH_ALEN]; - u32 pmac_id = adapter->pmac_id[0]; + u32 pmac_id = adapter->pmac_id; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -248,7 +248,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) { status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, - adapter->if_handle, &adapter->pmac_id[0], 0); + adapter->if_handle, &adapter->pmac_id, 0); if (status) goto err; @@ -885,29 +885,6 @@ static void be_set_rx_mode(struct net_device *netdev) goto done; } - if (netdev_uc_count(netdev) != adapter->uc_macs) { - struct netdev_hw_addr *ha; - int i = 1; /* First slot is claimed by the Primary MAC */ - - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) { - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); - } - - if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) { - be_cmd_rx_filter(adapter, IFF_PROMISC, ON); - adapter->promiscuous = true; - goto done; - } - - netdev_for_each_uc_addr(ha, adapter->netdev) { - adapter->uc_macs++; /* First slot is for Primary MAC */ - be_cmd_pmac_add(adapter, (u8 *)ha->addr, - adapter->if_handle, - &adapter->pmac_id[adapter->uc_macs], 0); - } - } - be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); done: return; @@ -978,21 +955,14 @@ static int be_set_vf_vlan(struct net_device *netdev, return -EINVAL; if (vlan) { - if (adapter->vf_cfg[vf].vlan_tag != vlan) { - /* If this is new value, program it. Else skip. */ - adapter->vf_cfg[vf].vlan_tag = vlan; - - status = be_cmd_set_hsw_config(adapter, vlan, - vf + 1, adapter->vf_cfg[vf].if_handle); - } + adapter->vf_cfg[vf].vlan_tag = vlan; + adapter->vlans_added++; } else { - /* Reset Transparent Vlan Tagging. */ adapter->vf_cfg[vf].vlan_tag = 0; - vlan = adapter->vf_cfg[vf].def_vid; - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - adapter->vf_cfg[vf].if_handle); + adapter->vlans_added--; } + status = be_vid_config(adapter, true, vf); if (status) dev_info(&adapter->pdev->dev, @@ -2488,8 +2458,6 @@ static void be_vf_clear(struct be_adapter *adapter) static int be_clear(struct be_adapter *adapter) { - int i = 1; - if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { cancel_delayed_work_sync(&adapter->work); adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; @@ -2498,10 +2466,6 @@ static int be_clear(struct be_adapter *adapter) if (sriov_enabled(adapter)) be_vf_clear(adapter); - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); - be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_mcc_queues_destroy(adapter); @@ -2513,7 +2477,6 @@ static int be_clear(struct be_adapter *adapter) be_cmd_fw_clean(adapter); be_msix_disable(adapter); - kfree(adapter->pmac_id); return 0; } @@ -2532,7 +2495,7 @@ static int be_vf_setup(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; u32 cap_flags, en_flags, vf; - u16 def_vlan, lnk_speed; + u16 lnk_speed; int status; be_vf_setup_init(adapter); @@ -2556,12 +2519,6 @@ static int be_vf_setup(struct be_adapter *adapter) if (status) goto err; vf_cfg->tx_rate = lnk_speed * 10; - - status = be_cmd_get_hsw_config(adapter, &def_vlan, - vf + 1, vf_cfg->if_handle); - if (status) - goto err; - vf_cfg->def_vid = def_vlan; } return 0; err: @@ -2595,10 +2552,10 @@ static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) false, adapter->if_handle, pmac_id); if (!status) - adapter->pmac_id[0] = pmac_id; + adapter->pmac_id = pmac_id; } else { status = be_cmd_pmac_add(adapter, mac, - adapter->if_handle, &adapter->pmac_id[0], 0); + adapter->if_handle, &adapter->pmac_id, 0); } do_none: return status; @@ -2653,7 +2610,7 @@ static int be_setup(struct be_adapter *adapter) } status = be_cmd_if_create(adapter, cap_flags, en_flags, netdev->dev_addr, &adapter->if_handle, - &adapter->pmac_id[0], 0); + &adapter->pmac_id, 0); if (status != 0) goto err; @@ -3102,8 +3059,6 @@ static void be_netdev_init(struct net_device *netdev) netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->flags |= IFF_MULTICAST; netif_set_gso_max_size(netdev, 65535); @@ -3289,12 +3244,6 @@ static void __devexit be_remove(struct pci_dev *pdev) free_netdev(adapter->netdev); } -bool be_is_wol_supported(struct be_adapter *adapter) -{ - return ((adapter->wol_cap & BE_WOL_CAP) && - !be_is_wol_excluded(adapter)) ? true : false; -} - static int be_get_config(struct be_adapter *adapter) { int status; @@ -3305,36 +3254,14 @@ static int be_get_config(struct be_adapter *adapter) return status; if (adapter->function_mode & FLEX10_MODE) - adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8; + adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; else adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; - if (be_physfn(adapter)) - adapter->max_pmac_cnt = BE_UC_PMAC_COUNT; - else - adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; - - /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, - sizeof(u32), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_cmd_get_cntl_attributes(adapter); if (status) return status; - status = be_cmd_get_acpi_wol_cap(adapter); - if (status) { - /* in case of a failure to get wol capabillities - * check the exclusion list to determine WOL capability */ - if (!be_is_wol_excluded(adapter)) - adapter->wol_cap |= BE_WOL_CAP; - } - - if (be_is_wol_supported(adapter)) - adapter->wol = true; - return 0; } diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index d9428f0e738a..adb0ae4e4195 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -104,7 +104,10 @@ #include "fsl_pq_mdio.h" #define TX_TIMEOUT (1*HZ) +#undef BRIEF_GFAR_ERRORS +#undef VERBOSE_GFAR_ERRORS +const char gfar_driver_name[] = "Gianfar Ethernet"; const char gfar_driver_version[] = "1.3"; static int gfar_enet_open(struct net_device *dev); @@ -1752,12 +1755,9 @@ static void free_skb_resources(struct gfar_private *priv) /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { - struct netdev_queue *txq; tx_queue = priv->tx_queue[i]; - txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); if(tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); - netdev_tx_reset_queue(txq); } for (i = 0; i < priv->num_rx_queues; i++) { @@ -2217,8 +2217,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } - netdev_tx_sent_queue(txq, skb->len); - /* * We can work in parallel with gfar_clean_tx_ring(), except * when modifying num_txbdfree. Note that we didn't grab the lock @@ -2462,7 +2460,6 @@ static void gfar_align_skb(struct sk_buff *skb) static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { struct net_device *dev = tx_queue->dev; - struct netdev_queue *txq; struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp, *next = NULL; @@ -2474,13 +2471,10 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) int frags = 0, nr_txbds = 0; int i; int howmany = 0; - int tqi = tx_queue->qindex; - unsigned int bytes_sent = 0; u32 lstatus; size_t buflen; - rx_queue = priv->rx_queue[tqi]; - txq = netdev_get_tx_queue(dev, tqi); + rx_queue = priv->rx_queue[tx_queue->qindex]; bdp = tx_queue->dirty_tx; skb_dirtytx = tx_queue->skb_dirtytx; @@ -2539,8 +2533,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) bdp = next_txbd(bdp, base, tx_ring_size); } - bytes_sent += skb->len; - /* * If there's room in the queue (limit it to rx_buffer_size) * we add this skb back into the pool, if it's the right size @@ -2565,15 +2557,13 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) } /* If we freed a buffer, we can restart transmission, if necessary */ - if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) - netif_wake_subqueue(dev, tqi); + if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) + netif_wake_subqueue(dev, tx_queue->qindex); /* Update dirty indicators */ tx_queue->skb_dirtytx = skb_dirtytx; tx_queue->dirty_tx = bdp; - netdev_tx_completed_queue(txq, howmany, bytes_sent); - return howmany; } diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.h b/trunk/drivers/net/ethernet/freescale/gianfar.h index fc2488adca36..4fe0f342acec 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.h +++ b/trunk/drivers/net/ethernet/freescale/gianfar.h @@ -78,8 +78,11 @@ struct ethtool_rx_list { #define INCREMENTAL_BUFFER_SIZE 512 #define PHY_INIT_TIMEOUT 100000 +#define GFAR_PHY_CHANGE_TIME 2 +#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, " #define DRV_NAME "gfar-enet" +extern const char gfar_driver_name[]; extern const char gfar_driver_version[]; /* MAXIMUM NUMBER OF QUEUES SUPPORTED */ diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8d74efd04bb9..5a78d55f46e7 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -58,7 +58,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); -static const char stat_gstrings[][ETH_GSTRING_LEN] = { +static char stat_gstrings[][ETH_GSTRING_LEN] = { "rx-dropped-by-kernel", "rx-large-frame-errors", "rx-short-frame-errors", diff --git a/trunk/drivers/net/ethernet/intel/igbvf/defines.h b/trunk/drivers/net/ethernet/intel/igbvf/defines.h index 3e18045d8f89..33f40d3474ae 100644 --- a/trunk/drivers/net/ethernet/intel/igbvf/defines.h +++ b/trunk/drivers/net/ethernet/intel/igbvf/defines.h @@ -97,6 +97,10 @@ #define E1000_ERR_MAC_INIT 5 #define E1000_ERR_MBX 15 +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 diff --git a/trunk/drivers/net/ethernet/intel/igbvf/vf.c b/trunk/drivers/net/ethernet/intel/igbvf/vf.c index 30a6cc426037..19551977b352 100644 --- a/trunk/drivers/net/ethernet/intel/igbvf/vf.c +++ b/trunk/drivers/net/ethernet/intel/igbvf/vf.c @@ -246,7 +246,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, for (i = 0; i < cnt; i++) { hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); hash_list[i] = hash_value & 0x0FFFF; - mc_addr_list += ETH_ALEN; + mc_addr_list += ETH_ADDR_LEN; } mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); @@ -333,7 +333,10 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) **/ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) { - memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; return E1000_SUCCESS; } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e0d809d0ed75..468cb9048dc8 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -152,6 +152,7 @@ struct ixgbe_tx_buffer { struct sk_buff *skb; unsigned int bytecount; unsigned short gso_segs; + __be16 protocol; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; @@ -632,7 +633,7 @@ extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u32 tx_flags, u8 *hdr_len); + u8 *hdr_len); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 5f943d3f85c4..77ea4b716535 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -448,16 +448,15 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * @tx_ring: tx desc ring * @first: first tx_buffer structure containing skb, tx_flags, and protocol - * @tx_flags: tx flags * @hdr_len: hdr_len to be returned * * This sets up large send offload for FCoE * - * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error + * Returns : 0 indicates success, < 0 for error */ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u32 tx_flags, u8 *hdr_len) + u8 *hdr_len) { struct sk_buff *skb = first->skb; struct fc_frame_header *fh; @@ -539,8 +538,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, skb_shinfo(skb)->gso_size); first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= IXGBE_TX_FLAGS_FSO; } + /* set flag indicating FCOE to ixgbe_tx_map call */ + first->tx_flags |= IXGBE_TX_FLAGS_FCOE; + /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; @@ -550,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, sizeof(struct fc_frame_header); vlan_macip_lens |= (skb_transport_offset(skb) - 4) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); - return skb_is_gso(skb); + return 0; } static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1d8f9f83f8ed..7817f0473032 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6585,10 +6585,9 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u32 tx_flags, __be16 protocol, u8 *hdr_len) + u8 *hdr_len) { struct sk_buff *skb = first->skb; - int err; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; @@ -6596,7 +6595,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, return 0; if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } @@ -6604,7 +6603,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == __constant_htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -6613,12 +6612,17 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; } /* compute header lengths */ @@ -6637,17 +6641,16 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, - mss_l4len_idx); + mss_l4len_idx); return 1; } -static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - u32 tx_flags, __be16 protocol) +static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; @@ -6655,12 +6658,12 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(tx_flags & IXGBE_TX_FLAGS_TXSW)) - return false; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; } else { u8 l4_hdr = 0; - switch (protocol) { + switch (first->protocol) { case __constant_htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; @@ -6674,7 +6677,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", - skb->protocol); + first->protocol); } break; } @@ -6698,19 +6701,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but l4 proto=%x!\n", - skb->protocol); + l4_hdr); } break; } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } + /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, mss_l4len_idx); - - return (skb->ip_summed == CHECKSUM_PARTIAL); } static __le32 ixgbe_tx_cmd_type(u32 tx_flags) @@ -6775,7 +6780,6 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - u32 tx_flags, const u8 hdr_len) { dma_addr_t dma; @@ -6786,6 +6790,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; __le32 cmd_type; u16 i = tx_ring->next_to_use; @@ -6812,7 +6817,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, /* record length, and DMA address */ dma_unmap_len_set(first, len, size); dma_unmap_addr_set(first, dma, dma); - first->tx_flags = tx_flags; tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -6921,8 +6925,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, } static void ixgbe_atr(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *first, - u32 tx_flags, __be16 protocol) + struct ixgbe_tx_buffer *first) { struct ixgbe_q_vector *q_vector = ring->q_vector; union ixgbe_atr_hash_dword input = { .dword = 0 }; @@ -6949,9 +6952,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, hdr.network = skb_network_header(first->skb); /* Currently only IPv4/IPv6 with TCP is supported */ - if ((protocol != __constant_htons(ETH_P_IPV6) || + if ((first->protocol != __constant_htons(ETH_P_IPV6) || hdr.ipv6->nexthdr != IPPROTO_TCP) && - (protocol != __constant_htons(ETH_P_IP) || + (first->protocol != __constant_htons(ETH_P_IP) || hdr.ipv4->protocol != IPPROTO_TCP)) return; @@ -6968,7 +6971,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* reset sample count */ ring->atr_count = 0; - vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); /* * src and dst are inverted, think how the receiver sees them @@ -6983,13 +6986,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring, * since src port and flex bytes occupy the same word XOR them together * and write the value to source port portion of compressed dword */ - if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) + if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); else - common.port.src ^= th->dest ^ protocol; + common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == __constant_htons(ETH_P_IP)) { input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; } else { @@ -7145,43 +7148,36 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } } + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == __constant_htons(ETH_P_FCOE)) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - tso = ixgbe_fso(tx_ring, first, tx_flags, &hdr_len); + tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_FSO | - IXGBE_TX_FLAGS_FCOE; - else - tx_flags |= IXGBE_TX_FLAGS_FCOE; goto xmit_fcoe; } #endif /* IXGBE_FCOE */ - /* setup IPv4/IPv6 offloads */ - if (protocol == __constant_htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; - - tso = ixgbe_tso(tx_ring, first, tx_flags, protocol, &hdr_len); + tso = ixgbe_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; - else if (ixgbe_tx_csum(tx_ring, first, tx_flags, protocol)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; + else if (!tso) + ixgbe_tx_csum(tx_ring, first); /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) - ixgbe_atr(tx_ring, first, tx_flags, protocol); + ixgbe_atr(tx_ring, first); #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ - ixgbe_tx_map(tx_ring, first, tx_flags, hdr_len); + ixgbe_tx_map(tx_ring, first, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h index e4d738f6166d..43806d9d1e13 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h @@ -1190,6 +1190,7 @@ #define USB_AGG_EN BIT(3) +#define MAC_ADDR_LEN 6 #define LAST_ENTRY_OF_TX_PKT_BUFFER 255 #define POLLING_LLT_THRESHOLD 20 diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/reg.h index ebb1d5f5e7b5..9bc462331078 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/reg.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/reg.h @@ -998,6 +998,7 @@ #define SCR_RXBCUSEDK BIT(7) /* General definitions */ +#define MAC_ADDR_LEN 6 #define LAST_ENTRY_OF_TX_PKT_BUFFER 255 #define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127 diff --git a/trunk/include/linux/if_vlan.h b/trunk/include/linux/if_vlan.h index 33a6e1951d4d..13aff1e2183b 100644 --- a/trunk/include/linux/if_vlan.h +++ b/trunk/include/linux/if_vlan.h @@ -18,9 +18,10 @@ #include #include -#define VLAN_HLEN 4 /* The additional bytes required by VLAN - * (in addition to the Ethernet header) +#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) + * that VLAN requires. */ +#define VLAN_ETH_ALEN 6 /* Octets in one ethernet addr */ #define VLAN_ETH_HLEN 18 /* Total octets in header. */ #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ @@ -176,7 +177,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci) veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ - memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); skb->mac_header -= VLAN_HLEN; /* first, the ethernet type */ diff --git a/trunk/include/linux/snmp.h b/trunk/include/linux/snmp.h index 2e68f5ba0389..8ee8af4e6da9 100644 --- a/trunk/include/linux/snmp.h +++ b/trunk/include/linux/snmp.h @@ -233,7 +233,6 @@ enum LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */ LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */ - LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */ __LINUX_MIB_MAX }; diff --git a/trunk/net/ipv4/proc.c b/trunk/net/ipv4/proc.c index 8af0d44e4e22..02d61079f08b 100644 --- a/trunk/net/ipv4/proc.c +++ b/trunk/net/ipv4/proc.c @@ -257,7 +257,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), - SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), SNMP_MIB_SENTINEL }; diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index e886e2f7fa8d..68d4057cba00 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -4446,137 +4446,6 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) return 0; } -static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb1; - u32 seq, end_seq; - - TCP_ECN_check_ce(tp, skb); - - if (tcp_try_rmem_schedule(sk, skb->truesize)) { - /* TODO: should increment a counter */ - __kfree_skb(skb); - return; - } - - /* Disable header prediction. */ - tp->pred_flags = 0; - inet_csk_schedule_ack(sk); - - SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", - tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); - - skb1 = skb_peek_tail(&tp->out_of_order_queue); - if (!skb1) { - /* Initial out of order segment, build 1 SACK. */ - if (tcp_is_sack(tp)) { - tp->rx_opt.num_sacks = 1; - tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; - tp->selective_acks[0].end_seq = - TCP_SKB_CB(skb)->end_seq; - } - __skb_queue_head(&tp->out_of_order_queue, skb); - goto end; - } - - seq = TCP_SKB_CB(skb)->seq; - end_seq = TCP_SKB_CB(skb)->end_seq; - - if (seq == TCP_SKB_CB(skb1)->end_seq) { - /* Packets in ofo can stay in queue a long time. - * Better try to coalesce them right now - * to avoid future tcp_collapse_ofo_queue(), - * probably the most expensive function in tcp stack. - */ - if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) { - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPRCVCOALESCE); - BUG_ON(skb_copy_bits(skb, 0, - skb_put(skb1, skb->len), - skb->len)); - TCP_SKB_CB(skb1)->end_seq = end_seq; - TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq; - __kfree_skb(skb); - skb = NULL; - } else { - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - } - - if (!tp->rx_opt.num_sacks || - tp->selective_acks[0].end_seq != seq) - goto add_sack; - - /* Common case: data arrive in order after hole. */ - tp->selective_acks[0].end_seq = end_seq; - goto end; - } - - /* Find place to insert this segment. */ - while (1) { - if (!after(TCP_SKB_CB(skb1)->seq, seq)) - break; - if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { - skb1 = NULL; - break; - } - skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); - } - - /* Do skb overlap to previous one? */ - if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { - if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { - /* All the bits are present. Drop. */ - __kfree_skb(skb); - skb = NULL; - tcp_dsack_set(sk, seq, end_seq); - goto add_sack; - } - if (after(seq, TCP_SKB_CB(skb1)->seq)) { - /* Partial overlap. */ - tcp_dsack_set(sk, seq, - TCP_SKB_CB(skb1)->end_seq); - } else { - if (skb_queue_is_first(&tp->out_of_order_queue, - skb1)) - skb1 = NULL; - else - skb1 = skb_queue_prev( - &tp->out_of_order_queue, - skb1); - } - } - if (!skb1) - __skb_queue_head(&tp->out_of_order_queue, skb); - else - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - - /* And clean segments covered by new one as whole. */ - while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { - skb1 = skb_queue_next(&tp->out_of_order_queue, skb); - - if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) - break; - if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { - tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, - end_seq); - break; - } - __skb_unlink(skb1, &tp->out_of_order_queue); - tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, - TCP_SKB_CB(skb1)->end_seq); - __kfree_skb(skb1); - } - -add_sack: - if (tcp_is_sack(tp)) - tcp_sack_new_ofo_skb(sk, seq, end_seq); -end: - if (skb) - skb_set_owner_r(skb, sk); -} - - static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); @@ -4692,7 +4561,105 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) goto queue_and_out; } - tcp_data_queue_ofo(sk, skb); + TCP_ECN_check_ce(tp, skb); + + if (tcp_try_rmem_schedule(sk, skb->truesize)) + goto drop; + + /* Disable header prediction. */ + tp->pred_flags = 0; + inet_csk_schedule_ack(sk); + + SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", + tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); + + skb_set_owner_r(skb, sk); + + if (!skb_peek(&tp->out_of_order_queue)) { + /* Initial out of order segment, build 1 SACK. */ + if (tcp_is_sack(tp)) { + tp->rx_opt.num_sacks = 1; + tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; + tp->selective_acks[0].end_seq = + TCP_SKB_CB(skb)->end_seq; + } + __skb_queue_head(&tp->out_of_order_queue, skb); + } else { + struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); + u32 seq = TCP_SKB_CB(skb)->seq; + u32 end_seq = TCP_SKB_CB(skb)->end_seq; + + if (seq == TCP_SKB_CB(skb1)->end_seq) { + __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + + if (!tp->rx_opt.num_sacks || + tp->selective_acks[0].end_seq != seq) + goto add_sack; + + /* Common case: data arrive in order after hole. */ + tp->selective_acks[0].end_seq = end_seq; + return; + } + + /* Find place to insert this segment. */ + while (1) { + if (!after(TCP_SKB_CB(skb1)->seq, seq)) + break; + if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { + skb1 = NULL; + break; + } + skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); + } + + /* Do skb overlap to previous one? */ + if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { + if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { + /* All the bits are present. Drop. */ + __kfree_skb(skb); + tcp_dsack_set(sk, seq, end_seq); + goto add_sack; + } + if (after(seq, TCP_SKB_CB(skb1)->seq)) { + /* Partial overlap. */ + tcp_dsack_set(sk, seq, + TCP_SKB_CB(skb1)->end_seq); + } else { + if (skb_queue_is_first(&tp->out_of_order_queue, + skb1)) + skb1 = NULL; + else + skb1 = skb_queue_prev( + &tp->out_of_order_queue, + skb1); + } + } + if (!skb1) + __skb_queue_head(&tp->out_of_order_queue, skb); + else + __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + + /* And clean segments covered by new one as whole. */ + while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { + skb1 = skb_queue_next(&tp->out_of_order_queue, skb); + + if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) + break; + if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { + tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, + end_seq); + break; + } + __skb_unlink(skb1, &tp->out_of_order_queue); + tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, + TCP_SKB_CB(skb1)->end_seq); + __kfree_skb(skb1); + } + +add_sack: + if (tcp_is_sack(tp)) + tcp_sack_new_ofo_skb(sk, seq, end_seq); + } } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,