diff --git a/[refs] b/[refs] index eb118035d95e..a39893eea00f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a1a5da57d0884017b8c3a011a28d4f5e08a2ea4f +refs/heads/master: 68f20d948c86bd6bbc075052f6b6c45b8f56957e diff --git a/trunk/Documentation/networking/e100.txt b/trunk/Documentation/networking/e100.txt index 944aa55e79f8..162f323a7a1f 100644 --- a/trunk/Documentation/networking/e100.txt +++ b/trunk/Documentation/networking/e100.txt @@ -72,7 +72,7 @@ Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data ethtool -G eth? tx n, where n is the number of desired tx descriptors. Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by - default. Ethtool can be used as follows to force speed/duplex. + default. The ethtool utility can be used as follows to force speed/duplex. ethtool -s eth? autoneg off speed {10|100} duplex {full|half} @@ -126,30 +126,21 @@ Additional Configurations ------- The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. Ethtool + diagnostics, as well as displaying statistical information. The ethtool version 1.6 or later is required for this functionality. The latest release of ethtool can be found from - http://sourceforge.net/projects/gkernel. - - NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support - for a more complete ethtool feature set can be enabled by upgrading - ethtool to ethtool-1.8.1. - + http://ftp.kernel.org/pub/software/network/ethtool/ Enabling Wake on LAN* (WoL) --------------------------- - WoL is provided through the Ethtool* utility. Ethtool is included with Red - Hat* 8.0. For other Linux distributions, download and install Ethtool from - the following website: http://sourceforge.net/projects/gkernel. - - For instructions on enabling WoL with Ethtool, refer to the Ethtool man page. + WoL is provided through the ethtool* utility. For instructions on enabling + WoL with ethtool, refer to the ethtool man page. WoL will be enabled on the system during the next shut down or reboot. For this driver version, in order to enable WoL, the e100 driver must be loaded when shutting down or rebooting the system. - NAPI ---- diff --git a/trunk/Documentation/networking/e1000.txt b/trunk/Documentation/networking/e1000.txt index 6cb13e9e1346..71ca95855671 100644 --- a/trunk/Documentation/networking/e1000.txt +++ b/trunk/Documentation/networking/e1000.txt @@ -431,15 +431,15 @@ Additional Configurations Ethtool ------- The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. Ethtool + diagnostics, as well as displaying statistical information. The ethtool version 1.6 or later is required for this functionality. The latest release of ethtool can be found from - http://sourceforge.net/projects/gkernel. + http://ftp.kernel.org/pub/software/network/ethtool/ Enabling Wake on LAN* (WoL) --------------------------- - WoL is configured through the Ethtool* utility. + WoL is configured through the ethtool* utility. WoL will be enabled on the system during the next shut down or reboot. For this driver version, in order to enable WoL, the e1000 driver must be diff --git a/trunk/Documentation/networking/e1000e.txt b/trunk/Documentation/networking/e1000e.txt index 81a66e69a127..97b5ba942ebf 100644 --- a/trunk/Documentation/networking/e1000e.txt +++ b/trunk/Documentation/networking/e1000e.txt @@ -269,26 +269,26 @@ Additional Configurations ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. We - strongly recommend downloading the latest version of Ethtool at: + strongly recommend downloading the latest version of ethtool at: - http://sourceforge.net/projects/gkernel. + http://ftp.kernel.org/pub/software/network/ethtool/ Speed and Duplex ---------------- - Speed and Duplex are configured through the Ethtool* utility. For - instructions, refer to the Ethtool man page. + Speed and Duplex are configured through the ethtool* utility. For + instructions, refer to the ethtool man page. Enabling Wake on LAN* (WoL) --------------------------- - WoL is configured through the Ethtool* utility. For instructions on - enabling WoL with Ethtool, refer to the Ethtool man page. + WoL is configured through the ethtool* utility. For instructions on + enabling WoL with ethtool, refer to the ethtool man page. WoL will be enabled on the system during the next shut down or reboot. For this driver version, in order to enable WoL, the e1000e driver must be loaded when shutting down or rebooting the system. In most cases Wake On LAN is only supported on port A for multiple port - adapters. To verify if a port supports Wake on Lan run Ethtool eth. + adapters. To verify if a port supports Wake on Lan run ethtool eth. Support ======= diff --git a/trunk/Documentation/networking/igb.txt b/trunk/Documentation/networking/igb.txt index 4a5e29c19bd1..98953c0d5342 100644 --- a/trunk/Documentation/networking/igb.txt +++ b/trunk/Documentation/networking/igb.txt @@ -62,15 +62,15 @@ Additional Configurations ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The latest - version of Ethtool can be found at: + version of ethtool can be found at: http://ftp.kernel.org/pub/software/network/ethtool/ Enabling Wake on LAN* (WoL) --------------------------- - WoL is configured through the Ethtool* utility. + WoL is configured through the ethtool* utility. - For instructions on enabling WoL with Ethtool, refer to the Ethtool man page. + For instructions on enabling WoL with ethtool, refer to the ethtool man page. WoL will be enabled on the system during the next shut down or reboot. For this driver version, in order to enable WoL, the igb driver must be diff --git a/trunk/Documentation/networking/igbvf.txt b/trunk/Documentation/networking/igbvf.txt index 694817b17a9c..cbfe4ee65533 100644 --- a/trunk/Documentation/networking/igbvf.txt +++ b/trunk/Documentation/networking/igbvf.txt @@ -58,11 +58,11 @@ Additional Configurations Ethtool ------- The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. Ethtool + diagnostics, as well as displaying statistical information. The ethtool version 3.0 or later is required for this functionality, although we strongly recommend downloading the latest version at: - http://sourceforge.net/projects/gkernel. + http://ftp.kernel.org/pub/software/network/ethtool/ Support ======= diff --git a/trunk/Documentation/networking/ixgb.txt b/trunk/Documentation/networking/ixgb.txt index a0d0ffb5e584..e196f16df313 100644 --- a/trunk/Documentation/networking/ixgb.txt +++ b/trunk/Documentation/networking/ixgb.txt @@ -309,15 +309,15 @@ Additional Configurations Ethtool ------- The driver utilizes the ethtool interface for driver configuration and - diagnostics, as well as displaying statistical information. Ethtool + diagnostics, as well as displaying statistical information. The ethtool version 1.6 or later is required for this functionality. The latest release of ethtool can be found from - http://sourceforge.net/projects/gkernel + http://ftp.kernel.org/pub/software/network/ethtool/ - NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support - for a more complete ethtool feature set can be enabled by upgrading - to the latest version. + NOTE: The ethtool version 1.6 only supports a limited set of ethtool options. + Support for a more complete ethtool feature set can be enabled by + upgrading to the latest version. NAPI diff --git a/trunk/Documentation/networking/ixgbe.txt b/trunk/Documentation/networking/ixgbe.txt index 9ade2806d82c..af77ed3c4172 100644 --- a/trunk/Documentation/networking/ixgbe.txt +++ b/trunk/Documentation/networking/ixgbe.txt @@ -34,7 +34,7 @@ is an Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel optics and/or the direct attach cables listed below. When 82599-based SFP+ devices are connected back to back, they should be set to -the same Speed setting via Ethtool. Results may vary if you mix speed settings. +the same Speed setting via ethtool. Results may vary if you mix speed settings. 82598-based adapters support all passive direct attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach cables are not supported. @@ -110,7 +110,7 @@ threshold. When rx is enabled, the transmit unit will halt for the time delay specified when a PAUSE frame is received. Flow Control is enabled by default. If you want to disable a flow control -capable link partner, use Ethtool: +capable link partner, use ethtool: ethtool -A eth? autoneg off RX off TX off @@ -181,10 +181,10 @@ Additional Configurations ------- The driver utilizes the ethtool interface for driver configuration and diagnostics, as well as displaying statistical information. The latest - Ethtool version is required for this functionality. + ethtool version is required for this functionality. The latest release of ethtool can be found from - http://sourceforge.net/projects/gkernel. + http://ftp.kernel.org/pub/software/network/ethtool/ FCoE ---- diff --git a/trunk/drivers/net/bna/bfa_defs.h b/trunk/drivers/net/bna/bfa_defs.h index 2ea0dfe1cedc..29c1b8de2c2d 100644 --- a/trunk/drivers/net/bna/bfa_defs.h +++ b/trunk/drivers/net/bna/bfa_defs.h @@ -112,18 +112,16 @@ struct bfa_ioc_pci_attr { * IOC states */ enum bfa_ioc_state { - BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ - BFA_IOC_RESET = 2, /*!< IOC is in reset state */ - BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ - BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ - BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ - BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ - BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ - BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ - BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ - BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ - BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ - BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ + BFA_IOC_RESET = 1, /*!< IOC is in reset state */ + BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */ + BFA_IOC_GETATTR = 4, /*!< IOC is being configured */ + BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */ + BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */ + BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */ + BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */ + BFA_IOC_DISABLED = 9, /*!< IOC is disabled */ + BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */ }; /** diff --git a/trunk/drivers/net/bna/bfa_defs_mfg_comm.h b/trunk/drivers/net/bna/bfa_defs_mfg_comm.h index fdd677618361..987978fcb3fe 100644 --- a/trunk/drivers/net/bna/bfa_defs_mfg_comm.h +++ b/trunk/drivers/net/bna/bfa_defs_mfg_comm.h @@ -95,6 +95,28 @@ enum { (type) == BFA_MFG_TYPE_CNA10P1 || \ bfa_mfg_is_mezz(type))) +/** + * Check if the card having old wwn/mac handling + */ +#define bfa_mfg_is_old_wwn_mac_model(type) (( \ + (type) == BFA_MFG_TYPE_FC8P2 || \ + (type) == BFA_MFG_TYPE_FC8P1 || \ + (type) == BFA_MFG_TYPE_FC4P2 || \ + (type) == BFA_MFG_TYPE_FC4P1 || \ + (type) == BFA_MFG_TYPE_CNA10P2 || \ + (type) == BFA_MFG_TYPE_CNA10P1 || \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE)) + +#define bfa_mfg_increment_wwn_mac(m, i) \ +do { \ + u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \ + t += (i); \ + (m)[0] = (t >> 16) & 0xFF; \ + (m)[1] = (t >> 8) & 0xFF; \ + (m)[2] = t & 0xFF; \ +} while (0) + #define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ do { \ switch ((card_type)) { \ diff --git a/trunk/drivers/net/bna/bfa_ioc.c b/trunk/drivers/net/bna/bfa_ioc.c index 34933cb9569f..e94e5aa97515 100644 --- a/trunk/drivers/net/bna/bfa_ioc.c +++ b/trunk/drivers/net/bna/bfa_ioc.c @@ -26,6 +26,25 @@ * IOC local definitions */ +#define bfa_ioc_timer_start(__ioc) \ + mod_timer(&(__ioc)->ioc_timer, jiffies + \ + msecs_to_jiffies(BFA_IOC_TOV)) +#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer) + +#define bfa_ioc_recovery_timer_start(__ioc) \ + mod_timer(&(__ioc)->ioc_timer, jiffies + \ + msecs_to_jiffies(BFA_IOC_TOV_RECOVER)) + +#define bfa_sem_timer_start(__ioc) \ + mod_timer(&(__ioc)->sem_timer, jiffies + \ + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)) +#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer) + +#define bfa_hb_timer_start(__ioc) \ + mod_timer(&(__ioc)->hb_timer, jiffies + \ + msecs_to_jiffies(BFA_IOC_HB_TOV)) +#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer) + /** * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ @@ -36,16 +55,11 @@ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) -#define bfa_ioc_notify_fail(__ioc) \ - ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) -#define bfa_ioc_sync_join(__ioc) \ - ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) -#define bfa_ioc_sync_leave(__ioc) \ - ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) -#define bfa_ioc_sync_ack(__ioc) \ - ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) -#define bfa_ioc_sync_complete(__ioc) \ - ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_notify_hbfail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) + +#define bfa_ioc_is_optrom(__ioc) \ + (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) #define bfa_ioc_mbox_cmd_pending(__ioc) \ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ @@ -71,12 +85,6 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc); static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); -static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); -static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); -static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); -static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc); -static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); -static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param); static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); @@ -93,173 +101,72 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer); static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); +static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc); /** - * IOC state machine definitions/declarations + * IOC state machine events */ enum ioc_event { - IOC_E_RESET = 1, /*!< IOC reset request */ - IOC_E_ENABLE = 2, /*!< IOC enable request */ - IOC_E_DISABLE = 3, /*!< IOC disable request */ - IOC_E_DETACH = 4, /*!< driver detach cleanup */ - IOC_E_ENABLED = 5, /*!< f/w enabled */ - IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ - IOC_E_DISABLED = 7, /*!< f/w disabled */ - IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */ - IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */ - IOC_E_HBFAIL = 10, /*!< heartbeat failure */ - IOC_E_HWERROR = 11, /*!< hardware error interrupt */ - IOC_E_TIMEOUT = 12, /*!< timeout */ + IOC_E_ENABLE = 1, /*!< IOC enable request */ + IOC_E_DISABLE = 2, /*!< IOC disable request */ + IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ + IOC_E_FWREADY = 4, /*!< f/w initialization done */ + IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ + IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ + IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ + IOC_E_HBFAIL = 8, /*!< heartbeat failure */ + IOC_E_HWERROR = 9, /*!< hardware error interrupt */ + IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ + IOC_E_DETACH = 11, /*!< driver detach cleanup */ }; -bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); static struct bfa_sm_table ioc_sm_table[] = { - {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, - {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, + {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, + {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, + {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, + {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, - {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, - {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, + {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, + {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, }; -/** - * IOCPF state machine definitions/declarations - */ - -/* - * Forward declareations for iocpf state machine - */ -static void bfa_iocpf_enable(struct bfa_ioc *ioc); -static void bfa_iocpf_disable(struct bfa_ioc *ioc); -static void bfa_iocpf_fail(struct bfa_ioc *ioc); -static void bfa_iocpf_initfail(struct bfa_ioc *ioc); -static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); -static void bfa_iocpf_stop(struct bfa_ioc *ioc); - -/** - * IOCPF state machine events - */ -enum iocpf_event { - IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ - IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ - IOCPF_E_STOP = 3, /*!< stop on driver detach */ - IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ - IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ - IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ - IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ - IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ - IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ - IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ - IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ -}; - -/** - * IOCPF states - */ -enum bfa_iocpf_state { - BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ - BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ - BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ - BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ - BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ - BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ - BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ - BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ - BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ -}; - -bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, - enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, - enum iocpf_event); -bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); - -static struct bfa_sm_table iocpf_sm_table[] = { - {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, - {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, - {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, - {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, - {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, - {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, - {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, - {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, - {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, - {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, - {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, - {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, - {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, - {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, -}; - -/** - * IOC State Machine - */ - -/** - * Beginning state. IOC uninit state. - */ -static void -bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) -{ -} - -/** - * IOC is in uninit state. - */ -static void -bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_RESET: - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - /** * Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) { - bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); + ioc->retry_count = 0; + ioc->auto_recover = bfa_nw_auto_recover; } /** - * IOC is in reset state. + * Beginning state. IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) { switch (event) { case IOC_E_ENABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); break; case IOC_E_DISABLE: @@ -267,200 +174,6 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) break; case IOC_E_DETACH: - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -static void -bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) -{ - bfa_iocpf_enable(ioc); -} - -/** - * Host IOC function is being enabled, awaiting response from firmware. - * Semaphore is acquired. - */ -static void -bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_ENABLED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); - break; - - case IOC_E_PFAILED: - /* !!! fall through !!! */ - case IOC_E_HWERROR: - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); - if (event != IOC_E_PFAILED) - bfa_iocpf_initfail(ioc); - break; - - case IOC_E_DISABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_DETACH: - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - bfa_iocpf_stop(ioc); - break; - - case IOC_E_ENABLE: - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -/** - * Semaphore should be acquired for version check. - */ -static void -bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) -{ - mod_timer(&ioc->ioc_timer, jiffies + - msecs_to_jiffies(BFA_IOC_TOV)); - bfa_ioc_send_getattr(ioc); -} - -/** - * IOC configuration in progress. Timer is active. - */ -static void -bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_FWRSP_GETATTR: - del_timer(&ioc->ioc_timer); - bfa_ioc_check_attr_wwns(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_op); - break; - - case IOC_E_PFAILED: - case IOC_E_HWERROR: - del_timer(&ioc->ioc_timer); - /* fall through */ - case IOC_E_TIMEOUT: - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); - if (event != IOC_E_PFAILED) - bfa_iocpf_getattrfail(ioc); - break; - - case IOC_E_DISABLE: - del_timer(&ioc->ioc_timer); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_ENABLE: - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -static void -bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) -{ - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); - bfa_ioc_hb_monitor(ioc); -} - -static void -bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_ENABLE: - break; - - case IOC_E_DISABLE: - bfa_ioc_hb_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_PFAILED: - case IOC_E_HWERROR: - bfa_ioc_hb_stop(ioc); - /* !!! fall through !!! */ - case IOC_E_HBFAIL: - bfa_ioc_fail_notify(ioc); - if (ioc->iocpf.auto_recover) - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); - else - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); - - if (event != IOC_E_PFAILED) - bfa_iocpf_fail(ioc); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -static void -bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) -{ - bfa_iocpf_disable(ioc); -} - -/** - * IOC is being desabled - */ -static void -bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_DISABLED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); - break; - - case IOC_E_HWERROR: - /* - * No state change. Will move to disabled state - * after iocpf sm completes failure processing and - * moves to disabled state. - */ - bfa_iocpf_fail(ioc); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -/** - * IOC desable completion entry. - */ -static void -bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) -{ - bfa_ioc_disable_comp(ioc); -} - -static void -bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_ENABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); - break; - - case IOC_E_DISABLE: - ioc->cbfn->disable_cbfn(ioc->bfa); - break; - - case IOC_E_DETACH: - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - bfa_iocpf_stop(ioc); break; default: @@ -468,165 +181,42 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) } } -static void -bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) -{ -} - -/** - * Hardware initialization retry. - */ -static void -bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_ENABLED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); - break; - - case IOC_E_PFAILED: - case IOC_E_HWERROR: - /** - * Initialization retry failed. - */ - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - if (event != IOC_E_PFAILED) - bfa_iocpf_initfail(ioc); - break; - - case IOC_E_INITFAILED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); - break; - - case IOC_E_ENABLE: - break; - - case IOC_E_DISABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_DETACH: - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - bfa_iocpf_stop(ioc); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -static void -bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) -{ -} - -/** - * IOC failure. - */ -static void -bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) -{ - switch (event) { - case IOC_E_ENABLE: - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - break; - - case IOC_E_DISABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); - break; - - case IOC_E_DETACH: - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - bfa_iocpf_stop(ioc); - break; - - case IOC_E_HWERROR: - /* HB failure notification, ignore. */ - break; - - default: - bfa_sm_fault(ioc, event); - } -} - -/** - * IOCPF State Machine - */ - -/** - * Reset entry actions -- initialize state machine - */ -static void -bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) -{ - iocpf->retry_count = 0; - iocpf->auto_recover = bfa_nw_auto_recover; -} - -/** - * Beginning state. IOC is in reset state. - */ -static void -bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) -{ - switch (event) { - case IOCPF_E_ENABLE: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); - break; - - case IOCPF_E_STOP: - break; - - default: - bfa_sm_fault(iocpf->ioc, event); - } -} - /** * Semaphore should be acquired for version check. */ static void -bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) { - bfa_ioc_hw_sem_get(iocpf->ioc); + bfa_ioc_hw_sem_get(ioc); } /** * Awaiting h/w semaphore to continue with version check. */ static void -bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_SEMLOCKED: + case IOC_E_SEMLOCKED: if (bfa_ioc_firmware_lock(ioc)) { - if (bfa_ioc_sync_complete(ioc)) { - iocpf->retry_count = 0; - bfa_ioc_sync_join(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - } else { - bfa_ioc_firmware_unlock(ioc); - bfa_nw_ioc_hw_sem_release(ioc); - mod_timer(&ioc->sem_timer, jiffies + - msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); - } + ioc->retry_count = 0; + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); } else { bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); } break; - case IOCPF_E_DISABLE: + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + /* fall through */ + + case IOC_E_DETACH: bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); - bfa_ioc_pf_disabled(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; - case IOCPF_E_STOP: - bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + case IOC_E_FWREADY: break; default: @@ -635,42 +225,41 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) } /** - * Notify enable completion callback + * Notify enable completion callback and generate mismatch AEN. */ static void -bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) { - /* Call only the first time sm enters fwmismatch state. */ - if (iocpf->retry_count == 0) - bfa_ioc_pf_fwmismatch(iocpf->ioc); - - iocpf->retry_count++; - mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + - msecs_to_jiffies(BFA_IOC_TOV)); + /** + * Provide enable completion callback and AEN notification only once. + */ + if (ioc->retry_count == 0) + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + ioc->retry_count++; + bfa_ioc_timer_start(ioc); } /** * Awaiting firmware version match. */ static void -bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_TIMEOUT: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + case IOC_E_TIMEOUT: + bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); break; - case IOCPF_E_DISABLE: - del_timer(&ioc->iocpf_timer); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); - bfa_ioc_pf_disabled(ioc); + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + /* fall through */ + + case IOC_E_DETACH: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; - case IOCPF_E_STOP: - del_timer(&ioc->iocpf_timer); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + case IOC_E_FWREADY: break; default: @@ -682,34 +271,26 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) * Request for semaphore. */ static void -bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) { - bfa_ioc_hw_sem_get(iocpf->ioc); + bfa_ioc_hw_sem_get(ioc); } /** * Awaiting semaphore for h/w initialzation. */ static void -bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_SEMLOCKED: - if (bfa_ioc_sync_complete(ioc)) { - bfa_ioc_sync_join(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - } else { - bfa_nw_ioc_hw_sem_release(ioc); - mod_timer(&ioc->sem_timer, jiffies + - msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); - } + case IOC_E_SEMLOCKED: + ioc->retry_count = 0; + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); break; - case IOCPF_E_DISABLE: + case IOC_E_DISABLE: bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: @@ -718,46 +299,46 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) } static void -bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) { - mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + - msecs_to_jiffies(BFA_IOC_TOV)); - bfa_ioc_reset(iocpf->ioc, 0); + bfa_ioc_timer_start(ioc); + bfa_ioc_reset(ioc, false); } /** + * @brief * Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ static void -bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_FWREADY: - del_timer(&ioc->iocpf_timer); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); + case IOC_E_FWREADY: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; - case IOCPF_E_INITFAIL: - del_timer(&ioc->iocpf_timer); - /* - * !!! fall through !!! - */ + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + /* fall through */ + + case IOC_E_TIMEOUT: + ioc->retry_count++; + if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { + bfa_ioc_timer_start(ioc); + bfa_ioc_reset(ioc, true); + break; + } - case IOCPF_E_TIMEOUT: bfa_nw_ioc_hw_sem_release(ioc); - if (event == IOCPF_E_TIMEOUT) - bfa_ioc_pf_failed(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; - case IOCPF_E_DISABLE: - del_timer(&ioc->iocpf_timer); - bfa_ioc_sync_leave(ioc); + case IOC_E_DISABLE: bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: @@ -766,11 +347,10 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) } static void -bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) { - mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + - msecs_to_jiffies(BFA_IOC_TOV)); - bfa_ioc_send_enable(iocpf->ioc); + bfa_ioc_timer_start(ioc); + bfa_ioc_send_enable(ioc); } /** @@ -778,36 +358,39 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) * Semaphore is acquired. */ static void -bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_FWRSP_ENABLE: - del_timer(&ioc->iocpf_timer); + case IOC_E_FWRSP_ENABLE: + bfa_ioc_timer_stop(ioc); bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); break; - case IOCPF_E_INITFAIL: - del_timer(&ioc->iocpf_timer); - /* - * !!! fall through !!! - */ - case IOCPF_E_TIMEOUT: + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + /* fall through */ + + case IOC_E_TIMEOUT: + ioc->retry_count++; + if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { + writel(BFI_IOC_UNINIT, + ioc->ioc_regs.ioc_fwstate); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); + break; + } + bfa_nw_ioc_hw_sem_release(ioc); - if (event == IOCPF_E_TIMEOUT) - bfa_ioc_pf_failed(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; - case IOCPF_E_DISABLE: - del_timer(&ioc->iocpf_timer); + case IOC_E_DISABLE: + bfa_ioc_timer_stop(ioc); bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; - case IOCPF_E_FWREADY: + case IOC_E_FWREADY: bfa_ioc_send_enable(ioc); break; @@ -816,84 +399,38 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -static bool -bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) -{ - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); -} - -static void -bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) -{ - bfa_ioc_pf_enabled(iocpf->ioc); -} - -static void -bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) -{ - struct bfa_ioc *ioc = iocpf->ioc; - - switch (event) { - case IOCPF_E_DISABLE: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); - break; - - case IOCPF_E_GETATTRFAIL: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); - break; - - case IOCPF_E_FAIL: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); - break; - - case IOCPF_E_FWREADY: - bfa_ioc_pf_failed(ioc); - if (bfa_nw_ioc_is_operational(ioc)) - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); - else - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); - break; - - default: - bfa_sm_fault(ioc, event); - } -} - static void -bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) { - mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + - msecs_to_jiffies(BFA_IOC_TOV)); - bfa_ioc_send_disable(iocpf->ioc); + bfa_ioc_timer_start(ioc); + bfa_ioc_send_getattr(ioc); } /** - * IOC is being disabled + * @brief + * IOC configuration in progress. Timer is active. */ static void -bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_FWRSP_DISABLE: - case IOCPF_E_FWREADY: - del_timer(&ioc->iocpf_timer); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + case IOC_E_FWRSP_GETATTR: + bfa_ioc_timer_stop(ioc); + bfa_ioc_check_attr_wwns(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); break; - case IOCPF_E_FAIL: - del_timer(&ioc->iocpf_timer); - /* - * !!! fall through !!! - */ + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + /* fall through */ - case IOCPF_E_TIMEOUT: - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + case IOC_E_TIMEOUT: + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); break; - case IOCPF_E_FWRSP_ENABLE: + case IOC_E_DISABLE: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: @@ -902,27 +439,35 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) } static void -bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) { - bfa_ioc_hw_sem_get(iocpf->ioc); + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_hb_monitor(ioc); } -/** - * IOC hb ack request is being removed. - */ static void -bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_SEMLOCKED: - bfa_ioc_sync_leave(ioc); - bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); break; - case IOCPF_E_FAIL: + case IOC_E_HWERROR: + case IOC_E_FWREADY: + /** + * Hard error or IOC recovery by other function. + * Treat it same as heartbeat failure. + */ + bfa_ioc_hb_stop(ioc); + /* !!! fall through !!! */ + + case IOC_E_HBFAIL: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); break; default: @@ -930,29 +475,34 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * IOC disable completion entry. - */ static void -bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) { - bfa_ioc_pf_disabled(iocpf->ioc); + bfa_ioc_timer_start(ioc); + bfa_ioc_send_disable(ioc); } +/** + * IOC is being disabled + */ static void -bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_ENABLE: - iocpf->retry_count = 0; - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + case IOC_E_FWRSP_DISABLE: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; - case IOCPF_E_STOP: - bfa_ioc_firmware_unlock(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + /* + * !!! fall through !!! + */ + + case IOC_E_TIMEOUT: + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; default: @@ -960,51 +510,33 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) } } +/** + * IOC disable completion entry. + */ static void -bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) { - bfa_ioc_hw_sem_get(iocpf->ioc); + bfa_ioc_disable_comp(ioc); } -/** - * Hardware initialization failed. - */ static void -bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_SEMLOCKED: - bfa_ioc_notify_fail(ioc); - bfa_ioc_sync_ack(ioc); - iocpf->retry_count++; - if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { - bfa_ioc_sync_leave(ioc); - bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); - } else { - if (bfa_ioc_sync_complete(ioc)) - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - else { - bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); - } - } + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); break; - case IOCPF_E_DISABLE: - bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); break; - case IOCPF_E_STOP: - bfa_ioc_hw_sem_get_cancel(ioc); - bfa_ioc_firmware_unlock(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + case IOC_E_FWREADY: break; - case IOCPF_E_FAIL: + case IOC_E_DETACH: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); break; default: @@ -1013,27 +545,33 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) } static void -bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) { - bfa_ioc_pf_initfailed(iocpf->ioc); + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_ioc_timer_start(ioc); } /** + * @brief * Hardware initialization failed. */ static void -bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_DISABLE: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + case IOC_E_DISABLE: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; - case IOCPF_E_STOP: + case IOC_E_DETACH: + bfa_ioc_timer_stop(ioc); bfa_ioc_firmware_unlock(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + case IOC_E_TIMEOUT: + bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); break; default: @@ -1042,79 +580,80 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) } static void -bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) +bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) { + struct list_head *qe; + struct bfa_ioc_hbfail_notify *notify; + /** * Mark IOC as failed in hardware and stop firmware. */ - bfa_ioc_lpu_stop(iocpf->ioc); + bfa_ioc_lpu_stop(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + + /** + * Notify other functions on HB failure. + */ + bfa_ioc_notify_hbfail(ioc); + + /** + * Notify driver and common modules registered for notification. + */ + ioc->cbfn->hbfail_cbfn(ioc->bfa); + list_for_each(qe, &ioc->hb_notify_q) { + notify = (struct bfa_ioc_hbfail_notify *) qe; + notify->cbfn(notify->cbarg); + } /** * Flush any queued up mailbox requests. */ - bfa_ioc_mbox_hbfail(iocpf->ioc); - bfa_ioc_hw_sem_get(iocpf->ioc); + bfa_ioc_mbox_hbfail(ioc); + + /** + * Trigger auto-recovery after a delay. + */ + if (ioc->auto_recover) + mod_timer(&ioc->ioc_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV_RECOVER)); } /** - * IOC is in failed state. + * @brief + * IOC heartbeat failure. */ static void -bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) { - struct bfa_ioc *ioc = iocpf->ioc; - switch (event) { - case IOCPF_E_SEMLOCKED: - iocpf->retry_count = 0; - bfa_ioc_sync_ack(ioc); - bfa_ioc_notify_fail(ioc); - if (!iocpf->auto_recover) { - bfa_ioc_sync_leave(ioc); - bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); - } else { - if (bfa_ioc_sync_complete(ioc)) - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - else { - bfa_nw_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); - } - } - break; - case IOCPF_E_DISABLE: - bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); break; - case IOCPF_E_FAIL: + case IOC_E_DISABLE: + if (ioc->auto_recover) + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; - default: - bfa_sm_fault(ioc, event); - } -} - -static void -bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) -{ -} + case IOC_E_TIMEOUT: + bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); + break; -/** - * @brief - * IOC is in failed state. - */ -static void -bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) -{ - switch (event) { - case IOCPF_E_DISABLE: - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + case IOC_E_FWREADY: + /** + * Recovery is already initiated by other function. + */ break; + case IOC_E_HWERROR: + /* + * HB failure notification, ignore. + */ + break; default: - bfa_sm_fault(iocpf->ioc, event); + bfa_sm_fault(ioc, event); } } @@ -1139,6 +678,14 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc) } } +void +bfa_nw_ioc_sem_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + + bfa_ioc_hw_sem_get(ioc); +} + bool bfa_nw_ioc_sem_get(void __iomem *sem_reg) { @@ -1178,7 +725,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) */ r32 = readl(ioc->ioc_regs.ioc_sem_reg); if (r32 == 0) { - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); + bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); return; } @@ -1318,6 +865,12 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc) { struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; + /** + * If bios/efi boot (flash based) -- return true + */ + if (bfa_ioc_is_optrom(ioc)) + return true; + bfa_nw_ioc_fwver_get(ioc, &fwhdr); drv_fwhdr = (struct bfi_ioc_image_hdr *) bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); @@ -1381,15 +934,20 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) /** * If IOC function is disabled and firmware version is same, * just re-enable IOC. + * + * If option rom, IOC must not be in operational state. With + * convergence, IOC will be in operational state when 2nd driver + * is loaded. */ - if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { + if (ioc_fwstate == BFI_IOC_DISABLED || + (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { /** * When using MSI-X any pending firmware ready event should * be flushed. Otherwise MSI-X interrupts are not delivered. */ bfa_ioc_msgflush(ioc); ioc->cbfn->reset_cbfn(ioc->bfa); - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + bfa_fsm_send_event(ioc, IOC_E_FWREADY); return; } @@ -1475,6 +1033,7 @@ bfa_nw_ioc_hb_check(void *cbarg) hb_count = readl(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { + pr_crit("Firmware heartbeat failure at %d", hb_count); bfa_ioc_recover(ioc); return; } else { @@ -1519,6 +1078,11 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, */ bfa_ioc_lmem_init(ioc); + /** + * Flash based firmware boot + */ + if (bfa_ioc_is_optrom(ioc)) + boot_type = BFI_BOOT_TYPE_FLASH; fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); pgnum = bfa_ioc_smem_pgnum(ioc, loff); @@ -1645,55 +1209,6 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc) bfa_q_deq(&mod->cmd_q, &cmd); } -static void -bfa_ioc_fail_notify(struct bfa_ioc *ioc) -{ - struct list_head *qe; - struct bfa_ioc_hbfail_notify *notify; - - /** - * Notify driver and common modules registered for notification. - */ - ioc->cbfn->hbfail_cbfn(ioc->bfa); - list_for_each(qe, &ioc->hb_notify_q) { - notify = (struct bfa_ioc_hbfail_notify *) qe; - notify->cbfn(notify->cbarg); - } -} - -static void -bfa_ioc_pf_enabled(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(ioc, IOC_E_ENABLED); -} - -static void -bfa_ioc_pf_disabled(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(ioc, IOC_E_DISABLED); -} - -static void -bfa_ioc_pf_initfailed(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(ioc, IOC_E_INITFAILED); -} - -static void -bfa_ioc_pf_failed(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(ioc, IOC_E_PFAILED); -} - -static void -bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) -{ - /** - * Provide enable completion callback and AEN notification. - */ - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); -} - /** * IOC public */ @@ -1789,7 +1304,6 @@ static void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) { union bfi_ioc_i2h_msg_u *msg; - struct bfa_iocpf *iocpf = &ioc->iocpf; msg = (union bfi_ioc_i2h_msg_u *) m; @@ -1800,15 +1314,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) break; case BFI_IOC_I2H_READY_EVENT: - bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); + bfa_fsm_send_event(ioc, IOC_E_FWREADY); break; case BFI_IOC_I2H_ENABLE_REPLY: - bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); + bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); break; case BFI_IOC_I2H_DISABLE_REPLY: - bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); + bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); break; case BFI_IOC_I2H_GETATTR_REPLY: @@ -1834,13 +1348,11 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) ioc->fcmode = false; ioc->pllinit = false; ioc->dbg_fwsave_once = true; - ioc->iocpf.ioc = ioc; bfa_ioc_mbox_attach(ioc); INIT_LIST_HEAD(&ioc->hb_notify_q); - bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); - bfa_fsm_send_event(ioc, IOC_E_RESET); + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); } /** @@ -2145,40 +1657,7 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) static enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc) { - enum bfa_iocpf_state iocpf_st; - enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); - - if (ioc_st == BFA_IOC_ENABLING || - ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { - - iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); - - switch (iocpf_st) { - case BFA_IOCPF_SEMWAIT: - ioc_st = BFA_IOC_SEMWAIT; - break; - - case BFA_IOCPF_HWINIT: - ioc_st = BFA_IOC_HWINIT; - break; - - case BFA_IOCPF_FWMISMATCH: - ioc_st = BFA_IOC_FWMISMATCH; - break; - - case BFA_IOCPF_FAIL: - ioc_st = BFA_IOC_FAIL; - break; - - case BFA_IOCPF_INITFAIL: - ioc_st = BFA_IOC_INITFAIL; - break; - - default: - break; - } - } - return ioc_st; + return bfa_sm_to_state(ioc_sm_table, ioc->fsm); } void @@ -2210,84 +1689,44 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc) mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) { - return ioc->attr->mac; + /* + * Currently mfg mac is used as FCoE enode mac (not configured by PBC) + */ + if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) + return bfa_ioc_get_mfg_mac(ioc); + else + return ioc->attr->mac; } -/** - * Firmware failure detected. Start recovery actions. - */ -static void -bfa_ioc_recover(struct bfa_ioc *ioc) +static mac_t +bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) { - u16 bdf; + mac_t m; - bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | - ioc->pcidev.device_id); - - pr_crit("Firmware heartbeat failure at %d", bdf); - BUG_ON(1); -} + m = ioc->attr->mfg_mac; + if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) + m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); + else + bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), + bfa_ioc_pcifn(ioc)); -static void -bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) -{ - if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) - return; + return m; } /** - * @dg hal_iocpf_pvt BFA IOC PF private functions - * @{ + * Firmware failure detected. Start recovery actions. */ - -static void -bfa_iocpf_enable(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); -} - -static void -bfa_iocpf_disable(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); -} - -static void -bfa_iocpf_fail(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); -} - -static void -bfa_iocpf_initfail(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); -} - static void -bfa_iocpf_getattrfail(struct bfa_ioc *ioc) +bfa_ioc_recover(struct bfa_ioc *ioc) { - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } static void -bfa_iocpf_stop(struct bfa_ioc *ioc) -{ - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); -} - -void -bfa_nw_iocpf_timeout(void *ioc_arg) -{ - struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; - - bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); -} - -void -bfa_nw_iocpf_sem_timeout(void *ioc_arg) +bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) { - struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) + return; - bfa_ioc_hw_sem_get(ioc); } diff --git a/trunk/drivers/net/bna/bfa_ioc.h b/trunk/drivers/net/bna/bfa_ioc.h index e4974bc24ef6..a73d84ec808c 100644 --- a/trunk/drivers/net/bna/bfa_ioc.h +++ b/trunk/drivers/net/bna/bfa_ioc.h @@ -26,7 +26,16 @@ #define BFA_IOC_TOV 3000 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ -#define BFA_IOC_HWINIT_MAX 5 +#define BFA_IOC_HWINIT_MAX 2 +#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV + +/** + * Generic Scatter Gather Element used by driver + */ +struct bfa_sge { + u32 sg_len; + void *sg_addr; +}; /** * PCI device information required by IOC @@ -55,6 +64,19 @@ struct bfa_dma { #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ +/** + * @brief BFA dma address assignment macro + */ +#define bfa_dma_addr_set(dma_addr, pa) \ + __bfa_dma_addr_set(&dma_addr, (u64)pa) + +static inline void +__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) +{ + dma_addr->a32.addr_lo = (u32) pa; + dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa)); +} + /** * @brief BFA dma address assignment macro. (big endian format) */ @@ -83,11 +105,8 @@ struct bfa_ioc_regs { void __iomem *host_page_num_fn; void __iomem *heartbeat; void __iomem *ioc_fwstate; - void __iomem *alt_ioc_fwstate; void __iomem *ll_halt; - void __iomem *alt_ll_halt; void __iomem *err_set; - void __iomem *ioc_fail_sync; void __iomem *shirq_isr_next; void __iomem *shirq_msk_next; void __iomem *smem_page_start; @@ -146,22 +165,16 @@ struct bfa_ioc_hbfail_notify { (__notify)->cbarg = (__cbarg); \ } while (0) -struct bfa_iocpf { - bfa_fsm_t fsm; - struct bfa_ioc *ioc; - u32 retry_count; - bool auto_recover; -}; - struct bfa_ioc { bfa_fsm_t fsm; struct bfa *bfa; struct bfa_pcidev pcidev; + struct bfa_timer_mod *timer_mod; struct timer_list ioc_timer; - struct timer_list iocpf_timer; struct timer_list sem_timer; struct timer_list hb_timer; u32 hb_count; + u32 retry_count; struct list_head hb_notify_q; void *dbg_fwsave; int dbg_fwsave_len; @@ -169,6 +182,7 @@ struct bfa_ioc { enum bfi_mclass ioc_mc; struct bfa_ioc_regs ioc_regs; struct bfa_ioc_drv_stats stats; + bool auto_recover; bool fcmode; bool ctdev; bool cna; @@ -181,7 +195,6 @@ struct bfa_ioc { struct bfa_ioc_cbfn *cbfn; struct bfa_ioc_mbox_mod mbox_mod; struct bfa_ioc_hwif *ioc_hwif; - struct bfa_iocpf iocpf; }; struct bfa_ioc_hwif { @@ -192,12 +205,8 @@ struct bfa_ioc_hwif { void (*ioc_map_port) (struct bfa_ioc *ioc); void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, bool msix); - void (*ioc_notify_fail) (struct bfa_ioc *ioc); + void (*ioc_notify_hbfail) (struct bfa_ioc *ioc); void (*ioc_ownership_reset) (struct bfa_ioc *ioc); - void (*ioc_sync_join) (struct bfa_ioc *ioc); - void (*ioc_sync_leave) (struct bfa_ioc *ioc); - void (*ioc_sync_ack) (struct bfa_ioc *ioc); - bool (*ioc_sync_complete) (struct bfa_ioc *ioc); }; #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) @@ -262,6 +271,7 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc); void bfa_nw_ioc_disable(struct bfa_ioc *ioc); void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); + void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, struct bfa_ioc_hbfail_notify *notify); @@ -279,8 +289,7 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); */ void bfa_nw_ioc_timeout(void *ioc); void bfa_nw_ioc_hb_check(void *ioc); -void bfa_nw_iocpf_timeout(void *ioc); -void bfa_nw_iocpf_sem_timeout(void *ioc); +void bfa_nw_ioc_sem_timeout(void *ioc); /* * F/W Image Size & Chunk diff --git a/trunk/drivers/net/bna/bfa_ioc_ct.c b/trunk/drivers/net/bna/bfa_ioc_ct.c index 469997c4ffd1..121cfd6d48b1 100644 --- a/trunk/drivers/net/bna/bfa_ioc_ct.c +++ b/trunk/drivers/net/bna/bfa_ioc_ct.c @@ -22,15 +22,6 @@ #include "bfi_ctreg.h" #include "bfa_defs.h" -#define bfa_ioc_ct_sync_pos(__ioc) \ - ((u32) (1 << bfa_ioc_pcifn(__ioc))) -#define BFA_IOC_SYNC_REQD_SH 16 -#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) -#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) -#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) -#define bfa_ioc_ct_sync_reqd_pos(__ioc) \ - (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) - /* * forward declarations */ @@ -39,12 +30,8 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); -static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); +static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); -static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); -static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); -static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); -static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); static struct bfa_ioc_hwif nw_hwif_ct; @@ -61,12 +48,8 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; + nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; ioc->ioc_hwif = &nw_hwif_ct; } @@ -103,7 +86,6 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) if (usecnt == 0) { writel(1, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); - writel(0, ioc->ioc_regs.ioc_fail_sync); return true; } @@ -167,14 +149,12 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) * Notify other functions on HB failure. */ static void -bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) +bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc) { if (ioc->cna) { writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); - writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); /* Wait for halt to take effect */ readl(ioc->ioc_regs.ll_halt); - readl(ioc->ioc_regs.alt_ll_halt); } else { writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); readl(ioc->ioc_regs.err_set); @@ -226,19 +206,15 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; - ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; - ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); - ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; - ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* @@ -256,7 +232,6 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); - ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); /** * sram memory access @@ -342,77 +317,6 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) bfa_nw_ioc_hw_sem_release(ioc); } -/** - * Synchronized IOC failure processing routines - */ -static void -bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) -{ - u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); - u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); - - writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); -} - -static void -bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) -{ - u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); - u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | - bfa_ioc_ct_sync_pos(ioc); - - writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); -} - -static void -bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) -{ - u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); - - writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); -} - -static bool -bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) -{ - u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); - u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); - u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); - u32 tmp_ackd; - - if (sync_ackd == 0) - return true; - - /** - * The check below is to see whether any other PCI fn - * has reinitialized the ASIC (reset sync_ackd bits) - * and failed again while this IOC was waiting for hw - * semaphore (in bfa_iocpf_sm_semwait()). - */ - tmp_ackd = sync_ackd; - if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && - !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) - sync_ackd |= bfa_ioc_ct_sync_pos(ioc); - - if (sync_reqd == sync_ackd) { - writel(bfa_ioc_ct_clear_sync_ackd(r32), - ioc->ioc_regs.ioc_fail_sync); - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); - return true; - } - - /** - * If another PCI fn reinitialized and failed again while - * this IOC was waiting for hw sem, the sync_ackd bit for - * this IOC need to be set again to allow reinitialization. - */ - if (tmp_ackd != sync_ackd) - writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); - - return false; -} - static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) { diff --git a/trunk/drivers/net/bna/bfi_ctreg.h b/trunk/drivers/net/bna/bfi_ctreg.h index 5130d7918660..404ea351d4a1 100644 --- a/trunk/drivers/net/bna/bfi_ctreg.h +++ b/trunk/drivers/net/bna/bfi_ctreg.h @@ -535,7 +535,6 @@ enum { #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG -#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG #define CPE_DEPTH_Q(__n) \ (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) @@ -553,30 +552,22 @@ enum { (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) #define RME_CI_PTR_Q(__n) \ (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) -#define HQM_QSET_RXQ_DRBL_P0(__n) \ - (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \ - (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) -#define HQM_QSET_TXQ_DRBL_P0(__n) \ - (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \ - (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) -#define HQM_QSET_IB_DRBL_1_P0(__n) \ - (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \ - (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) -#define HQM_QSET_IB_DRBL_2_P0(__n) \ - (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \ - (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) -#define HQM_QSET_RXQ_DRBL_P1(__n) \ - (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \ - (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) -#define HQM_QSET_TXQ_DRBL_P1(__n) \ - (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \ - (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) -#define HQM_QSET_IB_DRBL_1_P1(__n) \ - (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \ - (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) -#define HQM_QSET_IB_DRBL_2_P1(__n) \ - (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \ - (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) +#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ + * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) +#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ + * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) +#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ + * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) +#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ + * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) +#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ + * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) +#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ + * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) +#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ + * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) +#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ + * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) diff --git a/trunk/drivers/net/bna/bna.h b/trunk/drivers/net/bna/bna.h index a287f89b0289..df6676bbc84e 100644 --- a/trunk/drivers/net/bna/bna.h +++ b/trunk/drivers/net/bna/bna.h @@ -32,6 +32,8 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; /* Log string size */ #define BNA_MESSAGE_SIZE 256 +#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod)) + /* MBOX API for PORT, TX, RX */ #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ do { \ @@ -388,8 +390,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); /* API for RX */ int bna_port_mtu_get(struct bna_port *port); -void bna_llport_rx_started(struct bna_llport *llport); -void bna_llport_rx_stopped(struct bna_llport *llport); +void bna_llport_admin_up(struct bna_llport *llport); +void bna_llport_admin_down(struct bna_llport *llport); /* API for BNAD */ void bna_port_enable(struct bna_port *port); diff --git a/trunk/drivers/net/bna/bna_ctrl.c b/trunk/drivers/net/bna/bna_ctrl.c index e1527472b961..07b26598546e 100644 --- a/trunk/drivers/net/bna/bna_ctrl.c +++ b/trunk/drivers/net/bna/bna_ctrl.c @@ -59,70 +59,14 @@ bna_port_cb_link_down(struct bna_port *port, int status) port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); } -static inline int -llport_can_be_up(struct bna_llport *llport) -{ - int ready = 0; - if (llport->type == BNA_PORT_T_REGULAR) - ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && - (llport->flags & BNA_LLPORT_F_RX_STARTED) && - (llport->flags & BNA_LLPORT_F_PORT_ENABLED)); - else - ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && - (llport->flags & BNA_LLPORT_F_RX_STARTED) && - !(llport->flags & BNA_LLPORT_F_PORT_ENABLED)); - return ready; -} - -#define llport_is_up llport_can_be_up - -enum bna_llport_event { - LLPORT_E_START = 1, - LLPORT_E_STOP = 2, - LLPORT_E_FAIL = 3, - LLPORT_E_UP = 4, - LLPORT_E_DOWN = 5, - LLPORT_E_FWRESP_UP_OK = 6, - LLPORT_E_FWRESP_UP_FAIL = 7, - LLPORT_E_FWRESP_DOWN = 8 -}; - -static void -bna_llport_cb_port_enabled(struct bna_llport *llport) -{ - llport->flags |= BNA_LLPORT_F_PORT_ENABLED; - - if (llport_can_be_up(llport)) - bfa_fsm_send_event(llport, LLPORT_E_UP); -} - -static void -bna_llport_cb_port_disabled(struct bna_llport *llport) -{ - int llport_up = llport_is_up(llport); - - llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; - - if (llport_up) - bfa_fsm_send_event(llport, LLPORT_E_DOWN); -} - /** * MBOX */ static int bna_is_aen(u8 msg_id) { - switch (msg_id) { - case BFI_LL_I2H_LINK_DOWN_AEN: - case BFI_LL_I2H_LINK_UP_AEN: - case BFI_LL_I2H_PORT_ENABLE_AEN: - case BFI_LL_I2H_PORT_DISABLE_AEN: - return 1; - - default: - return 0; - } + return msg_id == BFI_LL_I2H_LINK_DOWN_AEN || + msg_id == BFI_LL_I2H_LINK_UP_AEN; } static void @@ -137,12 +81,6 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg) case BFI_LL_I2H_LINK_DOWN_AEN: bna_port_cb_link_down(&bna->port, aen->reason); break; - case BFI_LL_I2H_PORT_ENABLE_AEN: - bna_llport_cb_port_enabled(&bna->port.llport); - break; - case BFI_LL_I2H_PORT_DISABLE_AEN: - bna_llport_cb_port_disabled(&bna->port.llport); - break; default: break; } @@ -313,6 +251,16 @@ static void bna_llport_start(struct bna_llport *llport); static void bna_llport_stop(struct bna_llport *llport); static void bna_llport_fail(struct bna_llport *llport); +enum bna_llport_event { + LLPORT_E_START = 1, + LLPORT_E_STOP = 2, + LLPORT_E_FAIL = 3, + LLPORT_E_UP = 4, + LLPORT_E_DOWN = 5, + LLPORT_E_FWRESP_UP = 6, + LLPORT_E_FWRESP_DOWN = 7 +}; + enum bna_llport_state { BNA_LLPORT_STOPPED = 1, BNA_LLPORT_DOWN = 2, @@ -372,7 +320,7 @@ bna_llport_sm_stopped(struct bna_llport *llport, /* No-op */ break; - case LLPORT_E_FWRESP_UP_OK: + case LLPORT_E_FWRESP_UP: case LLPORT_E_FWRESP_DOWN: /** * These events are received due to flushing of mbox when @@ -418,7 +366,6 @@ bna_llport_sm_down(struct bna_llport *llport, static void bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport) { - BUG_ON(!llport_can_be_up(llport)); /** * NOTE: Do not call bna_fw_llport_up() here. That will over step * mbox due to down_resp_wait -> up_resp_wait transition on event @@ -443,14 +390,10 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport, bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); break; - case LLPORT_E_FWRESP_UP_OK: + case LLPORT_E_FWRESP_UP: bfa_fsm_set_state(llport, bna_llport_sm_up); break; - case LLPORT_E_FWRESP_UP_FAIL: - bfa_fsm_set_state(llport, bna_llport_sm_down); - break; - case LLPORT_E_FWRESP_DOWN: /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */ bna_fw_llport_up(llport); @@ -488,12 +431,11 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport, bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); break; - case LLPORT_E_FWRESP_UP_OK: + case LLPORT_E_FWRESP_UP: /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */ bna_fw_llport_down(llport); break; - case LLPORT_E_FWRESP_UP_FAIL: case LLPORT_E_FWRESP_DOWN: bfa_fsm_set_state(llport, bna_llport_sm_down); break; @@ -554,12 +496,11 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport, /* No-op */ break; - case LLPORT_E_FWRESP_UP_OK: + case LLPORT_E_FWRESP_UP: /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */ bna_fw_llport_down(llport); break; - case LLPORT_E_FWRESP_UP_FAIL: case LLPORT_E_FWRESP_DOWN: bfa_fsm_set_state(llport, bna_llport_sm_stopped); break; @@ -600,14 +541,7 @@ bna_fw_cb_llport_up(void *arg, int status) struct bna_llport *llport = (struct bna_llport *)arg; bfa_q_qe_init(&llport->mbox_qe.qe); - if (status == BFI_LL_CMD_FAIL) { - if (llport->type == BNA_PORT_T_REGULAR) - llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; - else - llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; - bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL); - } else - bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK); + bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP); } static void @@ -654,14 +588,13 @@ bna_port_cb_llport_stopped(struct bna_port *port, static void bna_llport_init(struct bna_llport *llport, struct bna *bna) { - llport->flags |= BNA_LLPORT_F_ADMIN_UP; - llport->flags |= BNA_LLPORT_F_PORT_ENABLED; + llport->flags |= BNA_LLPORT_F_ENABLED; llport->type = BNA_PORT_T_REGULAR; llport->bna = bna; llport->link_status = BNA_LINK_DOWN; - llport->rx_started_count = 0; + llport->admin_up_count = 0; llport->stop_cbfn = NULL; @@ -673,8 +606,7 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna) static void bna_llport_uninit(struct bna_llport *llport) { - llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; - llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; + llport->flags &= ~BNA_LLPORT_F_ENABLED; llport->bna = NULL; } @@ -696,8 +628,6 @@ bna_llport_stop(struct bna_llport *llport) static void bna_llport_fail(struct bna_llport *llport) { - /* Reset the physical port status to enabled */ - llport->flags |= BNA_LLPORT_F_PORT_ENABLED; bfa_fsm_send_event(llport, LLPORT_E_FAIL); } @@ -708,31 +638,25 @@ bna_llport_state_get(struct bna_llport *llport) } void -bna_llport_rx_started(struct bna_llport *llport) +bna_llport_admin_up(struct bna_llport *llport) { - llport->rx_started_count++; + llport->admin_up_count++; - if (llport->rx_started_count == 1) { - - llport->flags |= BNA_LLPORT_F_RX_STARTED; - - if (llport_can_be_up(llport)) + if (llport->admin_up_count == 1) { + llport->flags |= BNA_LLPORT_F_RX_ENABLED; + if (llport->flags & BNA_LLPORT_F_ENABLED) bfa_fsm_send_event(llport, LLPORT_E_UP); } } void -bna_llport_rx_stopped(struct bna_llport *llport) +bna_llport_admin_down(struct bna_llport *llport) { - int llport_up = llport_is_up(llport); - - llport->rx_started_count--; + llport->admin_up_count--; - if (llport->rx_started_count == 0) { - - llport->flags &= ~BNA_LLPORT_F_RX_STARTED; - - if (llport_up) + if (llport->admin_up_count == 0) { + llport->flags &= ~BNA_LLPORT_F_RX_ENABLED; + if (llport->flags & BNA_LLPORT_F_ENABLED) bfa_fsm_send_event(llport, LLPORT_E_DOWN); } } @@ -2132,6 +2056,37 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status) bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); } +static void +__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) +{ + struct bna_rx_fndb_ram *rx_fndb_ram; + u32 ctrl_flags; + int i; + + rx_fndb_ram = (struct bna_rx_fndb_ram *) + BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva, + RX_FNDB_RAM_BASE_OFFSET); + + for (i = 0; i < BFI_MAX_RXF; i++) { + if (status == BNA_STATUS_T_ENABLED) { + if (i == rxf->rxf_id) + continue; + + ctrl_flags = + readl(&rx_fndb_ram[i].control_flags); + ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE; + writel(ctrl_flags, + &rx_fndb_ram[i].control_flags); + } else { + ctrl_flags = + readl(&rx_fndb_ram[i].control_flags); + ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE; + writel(ctrl_flags, + &rx_fndb_ram[i].control_flags); + } + } +} + int rxf_process_packet_filter_ucast(struct bna_rxf *rxf) { @@ -2197,6 +2152,46 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf) return 0; } +int +rxf_process_packet_filter_default(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* Enable/disable default mode */ + if (is_default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move default configuration from pending -> active */ + default_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_DEFAULT; + + /* Disable VLAN filter to allow all VLANs */ + __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED); + /* Redirect all other RxF vlan filtering to this one */ + __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED); + rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, + BNA_STATUS_T_ENABLED); + return 1; + } else if (is_default_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move default configuration from pending -> active */ + default_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; + bna->rxf_default_id = BFI_MAX_RXF; + + /* Revert VLAN filter */ + __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); + /* Stop RxF vlan filter table redirection */ + __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); + rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, + BNA_STATUS_T_DISABLED); + return 1; + } + + return 0; +} + int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf) { @@ -2293,6 +2288,48 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf) return 0; } +int +rxf_clear_packet_filter_default(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* 8. Execute pending default mode disable command */ + if (is_default_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move default configuration from pending -> active */ + default_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; + bna->rxf_default_id = BFI_MAX_RXF; + + /* Revert VLAN filter */ + __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); + /* Stop RxF vlan filter table redirection */ + __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); + rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, + BNA_STATUS_T_DISABLED); + return 1; + } + + /* 9. Clear active default mode; move it to pending enable */ + if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { + /* move default configuration from active -> pending */ + default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; + + /* Revert VLAN filter */ + __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); + /* Stop RxF vlan filter table redirection */ + __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); + rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, + BNA_STATUS_T_DISABLED); + return 1; + } + + return 0; +} + int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf) { @@ -2367,6 +2404,28 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf) } +void +rxf_reset_packet_filter_default(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* 8. Clear pending default mode disable */ + if (is_default_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + default_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; + bna->rxf_default_id = BFI_MAX_RXF; + } + + /* 9. Move default mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { + default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; + } +} + void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) { @@ -2456,6 +2515,76 @@ rxf_promisc_disable(struct bna_rxf *rxf) return ret; } +/** + * Should only be called by bna_rxf_mode_set. + * Helps deciding if h/w configuration is needed or not. + * Returns: + * 0 = no h/w change + * 1 = need h/w change + */ +static int +rxf_default_enable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + /* There can not be any pending disable command */ + + /* Do nothing if pending enable or already enabled */ + if (is_default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) { + /* Schedule enable */ + } else { + /* Default mode should not be active in the system */ + default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->rxf_default_id = rxf->rxf_id; + ret = 1; + } + + return ret; +} + +/** + * Should only be called by bna_rxf_mode_set. + * Helps deciding if h/w configuration is needed or not. + * Returns: + * 0 = no h/w change + * 1 = need h/w change + */ +static int +rxf_default_disable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + /* There can not be any pending disable */ + + /* Turn off pending enable command , if any */ + if (is_default_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Promisc mode should not be active */ + /* system default state should be pending */ + default_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + /* Remove the default state from the system */ + bna->rxf_default_id = BFI_MAX_RXF; + + /* Schedule disable */ + } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { + /* Default mode should be active in the system */ + default_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + + /* Do nothing if already disabled */ + } else { + } + + return ret; +} + /** * Should only be called by bna_rxf_mode_set. * Helps deciding if h/w configuration is needed or not. @@ -2525,13 +2654,38 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, struct bna_rxf *rxf = &rx->rxf; int need_hw_config = 0; - /* Process the commands */ + /* Error checks */ if (is_promisc_enable(new_mode, bitmask)) { /* If promisc mode is already enabled elsewhere in the system */ if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) && (rx->bna->rxf_promisc_id != rxf->rxf_id)) goto err_return; + + /* If default mode is already enabled in the system */ + if (rx->bna->rxf_default_id != BFI_MAX_RXF) + goto err_return; + + /* Trying to enable promiscuous and default mode together */ + if (is_default_enable(new_mode, bitmask)) + goto err_return; + } + + if (is_default_enable(new_mode, bitmask)) { + /* If default mode is already enabled elsewhere in the system */ + if ((rx->bna->rxf_default_id != BFI_MAX_RXF) && + (rx->bna->rxf_default_id != rxf->rxf_id)) { + goto err_return; + } + + /* If promiscuous mode is already enabled in the system */ + if (rx->bna->rxf_promisc_id != BFI_MAX_RXF) + goto err_return; + } + + /* Process the commands */ + + if (is_promisc_enable(new_mode, bitmask)) { if (rxf_promisc_enable(rxf)) need_hw_config = 1; } else if (is_promisc_disable(new_mode, bitmask)) { @@ -2539,6 +2693,14 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, need_hw_config = 1; } + if (is_default_enable(new_mode, bitmask)) { + if (rxf_default_enable(rxf)) + need_hw_config = 1; + } else if (is_default_disable(new_mode, bitmask)) { + if (rxf_default_disable(rxf)) + need_hw_config = 1; + } + if (is_allmulti_enable(new_mode, bitmask)) { if (rxf_allmulti_enable(rxf)) need_hw_config = 1; @@ -2964,6 +3126,7 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev, bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); + bna->rxf_default_id = BFI_MAX_RXF; bna->rxf_promisc_id = BFI_MAX_RXF; /* Mbox q element for posting stat request to f/w */ diff --git a/trunk/drivers/net/bna/bna_txrx.c b/trunk/drivers/net/bna/bna_txrx.c index 58c7664040dc..ad93fdb0f427 100644 --- a/trunk/drivers/net/bna/bna_txrx.c +++ b/trunk/drivers/net/bna/bna_txrx.c @@ -1226,7 +1226,8 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf) /* Apply the VLAN filter */ if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; - if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC)) + if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) && + !(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); } @@ -1275,6 +1276,9 @@ rxf_process_packet_filter(struct bna_rxf *rxf) if (rxf_process_packet_filter_promisc(rxf)) return 1; + if (rxf_process_packet_filter_default(rxf)) + return 1; + if (rxf_process_packet_filter_allmulti(rxf)) return 1; @@ -1336,6 +1340,9 @@ rxf_clear_packet_filter(struct bna_rxf *rxf) if (rxf_clear_packet_filter_promisc(rxf)) return 1; + if (rxf_clear_packet_filter_default(rxf)) + return 1; + if (rxf_clear_packet_filter_allmulti(rxf)) return 1; @@ -1382,6 +1389,8 @@ rxf_reset_packet_filter(struct bna_rxf *rxf) rxf_reset_packet_filter_promisc(rxf); + rxf_reset_packet_filter_default(rxf); + rxf_reset_packet_filter_allmulti(rxf); } @@ -1432,16 +1441,12 @@ bna_rxf_init(struct bna_rxf *rxf, memset(rxf->vlan_filter_table, 0, (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); - /* Set up VLAN 0 for pure priority tagged packets */ - rxf->vlan_filter_table[0] |= 1; - bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); } static void bna_rxf_uninit(struct bna_rxf *rxf) { - struct bna *bna = rxf->rx->bna; struct bna_mac *mac; bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); @@ -1468,27 +1473,6 @@ bna_rxf_uninit(struct bna_rxf *rxf) bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); } - /* Turn off pending promisc mode */ - if (is_promisc_enable(rxf->rxmode_pending, - rxf->rxmode_pending_bitmask)) { - /* system promisc state should be pending */ - BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id)); - promisc_inactive(rxf->rxmode_pending, - rxf->rxmode_pending_bitmask); - bna->rxf_promisc_id = BFI_MAX_RXF; - } - /* Promisc mode should not be active */ - BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC); - - /* Turn off pending all-multi mode */ - if (is_allmulti_enable(rxf->rxmode_pending, - rxf->rxmode_pending_bitmask)) { - allmulti_inactive(rxf->rxmode_pending, - rxf->rxmode_pending_bitmask); - } - /* Allmulti mode should not be active */ - BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI); - rxf->rx = NULL; } @@ -1963,7 +1947,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx) bna_ib_ack(&rxp->cq.ib->door_bell, 0); } - bna_llport_rx_started(&rx->bna->port.llport); + bna_llport_admin_up(&rx->bna->port.llport); } void @@ -1971,13 +1955,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) { switch (event) { case RX_E_FAIL: - bna_llport_rx_stopped(&rx->bna->port.llport); + bna_llport_admin_down(&rx->bna->port.llport); bfa_fsm_set_state(rx, bna_rx_sm_stopped); rx_ib_fail(rx); bna_rxf_fail(&rx->rxf); break; case RX_E_STOP: - bna_llport_rx_stopped(&rx->bna->port.llport); + bna_llport_admin_down(&rx->bna->port.llport); bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); break; default: @@ -3389,7 +3373,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq) txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | - (txq->priority & 0x7)); + (txq->priority & 0x3)); txq_cfg.wvc_n_cquota_n_rquota = ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | (BFI_TX_MAX_WRR_QUOTA & 0xfff)); diff --git a/trunk/drivers/net/bna/bna_types.h b/trunk/drivers/net/bna/bna_types.h index b9c134f7ad31..6877310f6ef4 100644 --- a/trunk/drivers/net/bna/bna_types.h +++ b/trunk/drivers/net/bna/bna_types.h @@ -165,7 +165,8 @@ enum bna_rxp_type { enum bna_rxmode { BNA_RXMODE_PROMISC = 1, - BNA_RXMODE_ALLMULTI = 2 + BNA_RXMODE_DEFAULT = 2, + BNA_RXMODE_ALLMULTI = 4 }; enum bna_rx_event { @@ -248,9 +249,8 @@ enum bna_link_status { }; enum bna_llport_flags { - BNA_LLPORT_F_ADMIN_UP = 1, - BNA_LLPORT_F_PORT_ENABLED = 2, - BNA_LLPORT_F_RX_STARTED = 4 + BNA_LLPORT_F_ENABLED = 1, + BNA_LLPORT_F_RX_ENABLED = 2 }; enum bna_port_flags { @@ -405,7 +405,7 @@ struct bna_llport { enum bna_link_status link_status; - int rx_started_count; + int admin_up_count; void (*stop_cbfn)(struct bna_port *, enum bna_cb_status); @@ -1117,6 +1117,7 @@ struct bna { struct bna_rit_mod rit_mod; + int rxf_default_id; int rxf_promisc_id; struct bna_mbox_qe mbox_qe; diff --git a/trunk/drivers/net/bna/bnad.c b/trunk/drivers/net/bna/bnad.c index fad912656fe4..7e839b9cec22 100644 --- a/trunk/drivers/net/bna/bnad.c +++ b/trunk/drivers/net/bna/bnad.c @@ -70,8 +70,6 @@ do { \ (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ } while (0) -#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ - /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -109,7 +107,7 @@ static void bnad_free_all_txbufs(struct bnad *bnad, struct bna_tcb *tcb) { - u32 unmap_cons; + u16 unmap_cons; struct bnad_unmap_q *unmap_q = tcb->unmap_q; struct bnad_skb_unmap *unmap_array; struct sk_buff *skb = NULL; @@ -132,9 +130,7 @@ bnad_free_all_txbufs(struct bnad *bnad, PCI_DMA_TODEVICE); pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - if (++unmap_cons >= unmap_q->q_depth) - break; - + unmap_cons++; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { pci_unmap_page(bnad->pcidev, pci_unmap_addr(&unmap_array[unmap_cons], @@ -143,8 +139,7 @@ bnad_free_all_txbufs(struct bnad *bnad, PCI_DMA_TODEVICE); pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); - if (++unmap_cons >= unmap_q->q_depth) - break; + unmap_cons++; } dev_kfree_skb_any(skb); } @@ -172,11 +167,11 @@ bnad_free_txbufs(struct bnad *bnad, /* * Just return if TX is stopped. This check is useful * when bnad_free_txbufs() runs out of a tasklet scheduled - * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit + * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit * but this routine runs actually after the cleanup has been * executed. */ - if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) return 0; updated_hw_cons = *(tcb->hw_consumer_index); @@ -244,7 +239,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) { struct bnad *bnad = (struct bnad *)bnad_ptr; struct bna_tcb *tcb; - u32 acked = 0; + u32 acked; int i, j; for (i = 0; i < bnad->num_tx; i++) { @@ -257,26 +252,10 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) (!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))) { acked = bnad_free_txbufs(bnad, tcb); - if (likely(test_bit(BNAD_TXQ_TX_STARTED, - &tcb->flags))) - bna_ib_ack(tcb->i_dbell, acked); + bna_ib_ack(tcb->i_dbell, acked); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } - if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, - &tcb->flags))) - continue; - if (netif_queue_stopped(bnad->netdev)) { - if (acked && netif_carrier_ok(bnad->netdev) && - BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= - BNAD_NETIF_WAKE_THRESHOLD) { - netif_wake_queue(bnad->netdev); - /* TODO */ - /* Counters for individual TxQs? */ - BNAD_UPDATE_CTR(bnad, - netif_queue_wakeup); - } - } } } } @@ -285,7 +264,7 @@ static u32 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) { struct net_device *netdev = bnad->netdev; - u32 sent = 0; + u32 sent; if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) return 0; @@ -296,15 +275,12 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) netif_carrier_ok(netdev) && BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= BNAD_NETIF_WAKE_THRESHOLD) { - if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { - netif_wake_queue(netdev); - BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); - } + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); } - } - - if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) bna_ib_ack(tcb->i_dbell, sent); + } else + bna_ib_ack(tcb->i_dbell, 0); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); @@ -337,24 +313,25 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) } static void -bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q; struct sk_buff *skb; - int unmap_cons; unmap_q = rcb->unmap_q; - for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { - skb = unmap_q->unmap_array[unmap_cons].skb; - if (!skb) - continue; - unmap_q->unmap_array[unmap_cons].skb = NULL; + while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { + skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; + BUG_ON(!(skb)); + unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> - unmap_array[unmap_cons], - dma_addr), rcb->rxq->buffer_size, - PCI_DMA_FROMDEVICE); + unmap_array[unmap_q->consumer_index], + dma_addr), rcb->rxq->buffer_size + + NET_IP_ALIGN, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); + BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); + BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); } + bnad_reset_rcb(bnad, rcb); } @@ -408,11 +385,43 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) unmap_q->producer_index = unmap_prod; rcb->producer_index = unmap_prod; smp_mb(); - if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) - bna_rxq_prod_indx_doorbell(rcb); + bna_rxq_prod_indx_doorbell(rcb); } } +/* + * Locking is required in the enable path + * because it is called from a napi poll + * context, where the bna_lock is not held + * unlike the IRQ context. + */ +static void +bnad_enable_txrx_irqs(struct bnad *bnad) +{ + struct bna_tcb *tcb; + struct bna_ccb *ccb; + int i, j; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + bna_ib_coalescing_timer_set(tcb->i_dbell, + tcb->txq->ib->ib_config.coalescing_timeo); + bna_ib_ack(tcb->i_dbell, 0); + } + } + + for (i = 0; i < bnad->num_rx; i++) { + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + ccb = bnad->rx_info[i].rx_ctrl[j].ccb; + bnad_enable_rx_irq_unsafe(ccb); + } + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + static inline void bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) { @@ -439,9 +448,6 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) u32 qid0 = ccb->rcb[0]->rxq->rxq_id; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; - if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) - return 0; - prefetch(bnad->netdev); BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, wi_range); @@ -538,15 +544,12 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); if (likely(ccb)) { - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - bna_ib_ack(ccb->i_dbell, packets); + bna_ib_ack(ccb->i_dbell, packets); bnad_refill_rxq(bnad, ccb->rcb[0]); if (ccb->rcb[1]) bnad_refill_rxq(bnad, ccb->rcb[1]); - } else { - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - bna_ib_ack(ccb->i_dbell, 0); - } + } else + bna_ib_ack(ccb->i_dbell, 0); return packets; } @@ -554,9 +557,6 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) static void bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) { - if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) - return; - bna_ib_coalescing_timer_set(ccb->i_dbell, 0); bna_ib_ack(ccb->i_dbell, 0); } @@ -566,8 +566,7 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) { unsigned long flags; - /* Because of polling context */ - spin_lock_irqsave(&bnad->bna_lock, flags); + spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ bnad_enable_rx_irq_unsafe(ccb); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -576,11 +575,9 @@ static void bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) { struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); - struct napi_struct *napi = &rx_ctrl->napi; - - if (likely(napi_schedule_prep(napi))) { + if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { bnad_disable_rx_irq(bnad, ccb); - __napi_schedule(napi); + __napi_schedule((&rx_ctrl->napi)); } BNAD_UPDATE_CTR(bnad, netif_rx_schedule); } @@ -605,11 +602,12 @@ bnad_msix_mbox_handler(int irq, void *data) { u32 intr_status; unsigned long flags; - struct bnad *bnad = (struct bnad *)data; + struct net_device *netdev = data; + struct bnad *bnad; - if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) - return IRQ_HANDLED; + bnad = netdev_priv(netdev); + /* BNA_ISR_GET(bnad); Inc Ref count */ spin_lock_irqsave(&bnad->bna_lock, flags); bna_intr_status_get(&bnad->bna, intr_status); @@ -619,6 +617,7 @@ bnad_msix_mbox_handler(int irq, void *data) spin_unlock_irqrestore(&bnad->bna_lock, flags); + /* BNAD_ISR_PUT(bnad); Dec Ref count */ return IRQ_HANDLED; } @@ -628,7 +627,8 @@ bnad_isr(int irq, void *data) int i, j; u32 intr_status; unsigned long flags; - struct bnad *bnad = (struct bnad *)data; + struct net_device *netdev = data; + struct bnad *bnad = netdev_priv(netdev); struct bnad_rx_info *rx_info; struct bnad_rx_ctrl *rx_ctrl; @@ -642,21 +642,16 @@ bnad_isr(int irq, void *data) spin_lock_irqsave(&bnad->bna_lock, flags); - if (BNA_IS_MBOX_ERR_INTR(intr_status)) + if (BNA_IS_MBOX_ERR_INTR(intr_status)) { bna_mbox_handler(&bnad->bna, intr_status); - + if (!BNA_IS_INTX_DATA_INTR(intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + } spin_unlock_irqrestore(&bnad->bna_lock, flags); - if (!BNA_IS_INTX_DATA_INTR(intr_status)) - return IRQ_HANDLED; - /* Process data interrupts */ - /* Tx processing */ - for (i = 0; i < bnad->num_tx; i++) { - for (j = 0; j < bnad->num_txq_per_tx; j++) - bnad_tx(bnad, bnad->tx_info[i].tcb[j]); - } - /* Rx processing */ for (i = 0; i < bnad->num_rx; i++) { rx_info = &bnad->rx_info[i]; if (!rx_info->rx) @@ -668,6 +663,7 @@ bnad_isr(int irq, void *data) rx_ctrl->ccb); } } +done: return IRQ_HANDLED; } @@ -678,7 +674,11 @@ bnad_isr(int irq, void *data) static void bnad_enable_mbox_irq(struct bnad *bnad) { - clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + int irq = BNAD_GET_MBOX_IRQ(bnad); + + if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) + if (bnad->cfg_flags & BNAD_CF_MSIX) + enable_irq(irq); BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); } @@ -690,19 +690,14 @@ bnad_enable_mbox_irq(struct bnad *bnad) static void bnad_disable_mbox_irq(struct bnad *bnad) { - set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + int irq = BNAD_GET_MBOX_IRQ(bnad); - BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); -} -static void -bnad_set_netdev_perm_addr(struct bnad *bnad) -{ - struct net_device *netdev = bnad->netdev; + if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) + if (bnad->cfg_flags & BNAD_CF_MSIX) + disable_irq_nosync(irq); - memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); - if (is_zero_ether_addr(netdev->dev_addr)) - memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); } /* Control Path Handlers */ @@ -760,14 +755,11 @@ bnad_cb_port_link_status(struct bnad *bnad, if (link_up) { if (!netif_carrier_ok(bnad->netdev)) { - struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; - if (!tcb) - return; pr_warn("bna: %s link up\n", bnad->netdev->name); netif_carrier_on(bnad->netdev); BNAD_UPDATE_CTR(bnad, link_toggle); - if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { + if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { /* Force an immediate Transmit Schedule */ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); @@ -815,18 +807,6 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) { struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tcb->txq->tx->priv; - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - - while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) - cpu_relax(); - - bnad_free_all_txbufs(bnad, tcb); - - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); tx_info->tcb[tcb->id] = NULL; } @@ -841,12 +821,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; } -static void -bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) -{ - bnad_free_all_rxbufs(bnad, rcb); -} - static void bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) { @@ -875,7 +849,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) if (tx_info != &bnad->tx_info[0]) return; - clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); netif_stop_queue(bnad->netdev); pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); } @@ -883,36 +857,9 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) static void bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) { - struct bnad_unmap_q *unmap_q = tcb->unmap_q; - - if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) return; - clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); - - while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) - cpu_relax(); - - bnad_free_all_txbufs(bnad, tcb); - - unmap_q->producer_index = 0; - unmap_q->consumer_index = 0; - - smp_mb__before_clear_bit(); - clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); - - /* - * Workaround for first device enable failure & we - * get a 0 MAC address. We try to get the MAC address - * again here. - */ - if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { - bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); - bnad_set_netdev_perm_addr(bnad); - } - - set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); - if (netif_carrier_ok(bnad->netdev)) { pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); netif_wake_queue(bnad->netdev); @@ -923,22 +870,40 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) static void bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) { - /* Delay only once for the whole Tx Path Shutdown */ - if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) - mdelay(BNAD_TXRX_SYNC_MDELAY); + struct bnad_unmap_q *unmap_q; + + if (!tcb || (!tcb->unmap_q)) + return; + + unmap_q = tcb->unmap_q; + if (!unmap_q->unmap_array) + return; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return; + + bnad_free_all_txbufs(bnad, tcb); + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } static void bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_ccb *ccb) { + bnad_cq_cmpl_init(bnad, ccb); + + bnad_free_rxbufs(bnad, ccb->rcb[0]); clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); - if (ccb->rcb[1]) + if (ccb->rcb[1]) { + bnad_free_rxbufs(bnad, ccb->rcb[1]); clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); - - if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) - mdelay(BNAD_TXRX_SYNC_MDELAY); + } } static void @@ -946,13 +911,6 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q = rcb->unmap_q; - clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); - - if (rcb == rcb->cq->ccb->rcb[0]) - bnad_cq_cmpl_init(bnad, rcb->cq->ccb); - - bnad_free_all_rxbufs(bnad, rcb); - set_bit(BNAD_RXQ_STARTED, &rcb->flags); /* Now allocate & post buffers for this RCB */ @@ -1089,7 +1047,7 @@ bnad_mbox_irq_free(struct bnad *bnad, spin_unlock_irqrestore(&bnad->bna_lock, flags); irq = BNAD_GET_MBOX_IRQ(bnad); - free_irq(irq, bnad); + free_irq(irq, bnad->netdev); kfree(intr_info->idl); } @@ -1103,7 +1061,7 @@ static int bnad_mbox_irq_alloc(struct bnad *bnad, struct bna_intr_info *intr_info) { - int err = 0; + int err; unsigned long flags; u32 irq; irq_handler_t irq_handler; @@ -1138,17 +1096,22 @@ bnad_mbox_irq_alloc(struct bnad *bnad, */ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); - BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); - err = request_irq(irq, irq_handler, flags, - bnad->mbox_irq_name, bnad); + bnad->mbox_irq_name, bnad->netdev); if (err) { kfree(intr_info->idl); intr_info->idl = NULL; + return err; } - return err; + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (bnad->cfg_flags & BNAD_CF_MSIX) + disable_irq_nosync(irq); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return 0; } static void @@ -1425,24 +1388,13 @@ bnad_ioc_hb_check(unsigned long data) } static void -bnad_iocpf_timeout(unsigned long data) -{ - struct bnad *bnad = (struct bnad *)data; - unsigned long flags; - - spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); - spin_unlock_irqrestore(&bnad->bna_lock, flags); -} - -static void -bnad_iocpf_sem_timeout(unsigned long data) +bnad_ioc_sem_timeout(unsigned long data) { struct bnad *bnad = (struct bnad *)data; unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); + bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1603,19 +1555,62 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget) return rcvd; } +static int +bnad_napi_poll_txrx(struct napi_struct *napi, int budget) +{ + struct bnad_rx_ctrl *rx_ctrl = + container_of(napi, struct bnad_rx_ctrl, napi); + struct bna_ccb *ccb; + struct bnad *bnad; + int rcvd = 0; + int i, j; + + ccb = rx_ctrl->ccb; + + bnad = ccb->bnad; + + if (!netif_carrier_ok(bnad->netdev)) + goto poll_exit; + + /* Handle Tx Completions, if any */ + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) + bnad_tx(bnad, bnad->tx_info[i].tcb[j]); + } + + /* Handle Rx Completions */ + rcvd = bnad_poll_cq(bnad, ccb, budget); + if (rcvd == budget) + return rcvd; +poll_exit: + napi_complete((napi)); + + BNAD_UPDATE_CTR(bnad, netif_rx_complete); + + bnad_enable_txrx_irqs(bnad); + return rcvd; +} + static void bnad_napi_enable(struct bnad *bnad, u32 rx_id) { + int (*napi_poll) (struct napi_struct *, int); struct bnad_rx_ctrl *rx_ctrl; int i; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) + napi_poll = bnad_napi_poll_rx; + else + napi_poll = bnad_napi_poll_txrx; + spin_unlock_irqrestore(&bnad->bna_lock, flags); /* Initialize & enable NAPI */ for (i = 0; i < bnad->num_rxp_per_rx; i++) { rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; - netif_napi_add(bnad->netdev, &rx_ctrl->napi, - bnad_napi_poll_rx, 64); - + napi_poll, 64); napi_enable(&rx_ctrl->napi); } } @@ -1830,7 +1825,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) /* Initialize the Rx event handlers */ rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; rx_cbfn.rcb_destroy_cbfn = NULL; rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; @@ -1974,27 +1968,6 @@ bnad_enable_default_bcast(struct bnad *bnad) return 0; } -/* Called with bnad_conf_lock() held */ -static void -bnad_restore_vlans(struct bnad *bnad, u32 rx_id) -{ - u16 vlan_id; - unsigned long flags; - - if (!bnad->vlan_grp) - return; - - BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); - - for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) { - if (!vlan_group_get_device(bnad->vlan_grp, vlan_id)) - continue; - spin_lock_irqsave(&bnad->bna_lock, flags); - bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id); - spin_unlock_irqrestore(&bnad->bna_lock, flags); - } -} - /* Statistics utilities */ void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) @@ -2179,6 +2152,16 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) bnad->num_rxp_per_rx = 1; } +static void +bnad_set_netdev_perm_addr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + + memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); + if (is_zero_ether_addr(netdev->dev_addr)) + memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); +} + /* Enable / disable device */ static void bnad_device_disable(struct bnad *bnad) @@ -2370,9 +2353,6 @@ bnad_open(struct net_device *netdev) /* Enable broadcast */ bnad_enable_default_bcast(bnad); - /* Restore VLANs, if any */ - bnad_restore_vlans(bnad, 0); - /* Set the UCAST address */ spin_lock_irqsave(&bnad->bna_lock, flags); bnad_mac_addr_set_locked(bnad, netdev->dev_addr); @@ -2453,21 +2433,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - tx_id = 0; - - tx_info = &bnad->tx_info[tx_id]; - tcb = tx_info->tcb[tx_id]; - unmap_q = tcb->unmap_q; - /* * Takes care of the Tx that is scheduled between clearing the flag * and the netif_stop_queue() call. */ - if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { + if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) { dev_kfree_skb(skb); return NETDEV_TX_OK; } + tx_id = 0; + + tx_info = &bnad->tx_info[tx_id]; + tcb = tx_info->tcb[tx_id]; + unmap_q = tcb->unmap_q; + vectors = 1 + skb_shinfo(skb)->nr_frags; if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { dev_kfree_skb(skb); @@ -2482,8 +2462,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) tcb->consumer_index && !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { acked = bnad_free_txbufs(bnad, tcb); - if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) - bna_ib_ack(tcb->i_dbell, acked); + bna_ib_ack(tcb->i_dbell, acked); smp_mb__before_clear_bit(); clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); } else { @@ -2645,10 +2624,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) tcb->producer_index = txq_prod; smp_mb(); - - if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) - return NETDEV_TX_OK; - bna_txq_prod_indx_doorbell(tcb); if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) @@ -3057,7 +3032,7 @@ static int __devinit bnad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pcidev_id) { - bool using_dac = false; + bool using_dac; int err; struct bnad *bnad; struct bna *bna; @@ -3091,7 +3066,7 @@ bnad_pci_probe(struct pci_dev *pdev, /* * PCI initialization * Output : using_dac = 1 for 64 bit DMA - * = 0 for 32 bit DMA + * = 0 for 32 bit DMA */ err = bnad_pci_init(bnad, pdev, &using_dac); if (err) @@ -3109,9 +3084,6 @@ bnad_pci_probe(struct pci_dev *pdev, /* Initialize netdev structure, set up ethtool ops */ bnad_netdev_init(bnad, using_dac); - /* Set link to down state */ - netif_carrier_off(netdev); - bnad_enable_msix(bnad); /* Get resource requirement form bna */ @@ -3143,13 +3115,11 @@ bnad_pci_probe(struct pci_dev *pdev, ((unsigned long)bnad)); setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, ((unsigned long)bnad)); - setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, - ((unsigned long)bnad)); - setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, + setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout, ((unsigned long)bnad)); /* Now start the timer before calling IOC */ - mod_timer(&bnad->bna.device.ioc.iocpf_timer, + mod_timer(&bnad->bna.device.ioc.ioc_timer, jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); /* @@ -3167,6 +3137,11 @@ bnad_pci_probe(struct pci_dev *pdev, mutex_unlock(&bnad->conf_mutex); + /* + * Make sure the link appears down to the stack + */ + netif_carrier_off(netdev); + /* Finally, reguister with net_device layer */ err = register_netdev(netdev); if (err) { diff --git a/trunk/drivers/net/bna/bnad.h b/trunk/drivers/net/bna/bnad.h index 8b1d51557def..ebc3a9078642 100644 --- a/trunk/drivers/net/bna/bnad.h +++ b/trunk/drivers/net/bna/bnad.h @@ -51,7 +51,6 @@ */ struct bnad_rx_ctrl { struct bna_ccb *ccb; - unsigned long flags; struct napi_struct napi; }; @@ -65,7 +64,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "2.3.2.3" +#define BNAD_VERSION "2.3.2.0" #define BNAD_MAILBOX_MSIX_VECTORS 1 @@ -83,7 +82,6 @@ struct bnad_rx_ctrl { /* Bit positions for tcb->flags */ #define BNAD_TXQ_FREE_SENT 0 -#define BNAD_TXQ_TX_STARTED 1 /* Bit positions for rcb->flags */ #define BNAD_RXQ_REFILL 0 @@ -126,7 +124,6 @@ struct bnad_completion { struct bnad_drv_stats { u64 netif_queue_stop; u64 netif_queue_wakeup; - u64 netif_queue_stopped; u64 tso4; u64 tso6; u64 tso_err; @@ -202,12 +199,12 @@ struct bnad_unmap_q { /* Set, tested & cleared using xxx_bit() functions */ /* Values indicated bit positions */ #define BNAD_RF_CEE_RUNNING 1 -#define BNAD_RF_MBOX_IRQ_DISABLED 2 -#define BNAD_RF_RX_STARTED 3 -#define BNAD_RF_DIM_TIMER_RUNNING 4 -#define BNAD_RF_STATS_TIMER_RUNNING 5 -#define BNAD_RF_TX_SHUTDOWN_DELAYED 6 -#define BNAD_RF_RX_SHUTDOWN_DELAYED 7 +#define BNAD_RF_HW_ERROR 2 +#define BNAD_RF_MBOX_IRQ_DISABLED 3 +#define BNAD_RF_TX_STARTED 4 +#define BNAD_RF_RX_STARTED 5 +#define BNAD_RF_DIM_TIMER_RUNNING 6 +#define BNAD_RF_STATS_TIMER_RUNNING 7 struct bnad { struct net_device *netdev; @@ -309,10 +306,8 @@ extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id); extern void bnad_dim_timer_start(struct bnad *bnad); /* Statistics */ -extern void bnad_netdev_qstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); -extern void bnad_netdev_hwstats_fill(struct bnad *bnad, - struct rtnl_link_stats64 *stats); +extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); +extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); /** * MACROS @@ -325,11 +320,9 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, #define bnad_enable_rx_irq_unsafe(_ccb) \ { \ - if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\ - bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ - (_ccb)->rx_coalescing_timeo); \ - bna_ib_ack((_ccb)->i_dbell, 0); \ - } \ + bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ + (_ccb)->rx_coalescing_timeo); \ + bna_ib_ack((_ccb)->i_dbell, 0); \ } #define bnad_dim_timer_running(_bnad) \ diff --git a/trunk/drivers/net/bna/bnad_ethtool.c b/trunk/drivers/net/bna/bnad_ethtool.c index 99be5ae91991..11fa2ea842c1 100644 --- a/trunk/drivers/net/bna/bnad_ethtool.c +++ b/trunk/drivers/net/bna/bnad_ethtool.c @@ -68,7 +68,6 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "netif_queue_stop", "netif_queue_wakeup", - "netif_queue_stopped", "tso4", "tso6", "tso_err", @@ -331,6 +330,10 @@ do { \ BNAD_GET_REG(PCIE_MISC_REG); + BNAD_GET_REG(HOST_SEM0_REG); + BNAD_GET_REG(HOST_SEM1_REG); + BNAD_GET_REG(HOST_SEM2_REG); + BNAD_GET_REG(HOST_SEM3_REG); BNAD_GET_REG(HOST_SEM0_INFO_REG); BNAD_GET_REG(HOST_SEM1_INFO_REG); BNAD_GET_REG(HOST_SEM2_INFO_REG); @@ -1181,9 +1184,6 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, bi = sizeof(*net_stats64) / sizeof(u64); - /* Get netif_queue_stopped from stack */ - bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); - /* Fill driver stats into ethtool buffers */ stats64 = (u64 *)&bnad->stats.drv_stats; for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)