Skip to content

Commit

Permalink
Merge branch 'qed-next'
Browse files Browse the repository at this point in the history
Yuval Mintz says:

====================
qed/qede: Mostly-cleanup series

This series contains some cleanup of the qed and qede code:
 - #1 contains mostly static/endian changes in order to allow qede to
   pass sparse compilation cleanly.
 - #2, #5 and #6 are either semantic or remove dead-code from driver.
 - #9, #10 and #11 relate to printing and slightly change some APIs
   between qed and the protocol drivers for that end [sharing the
   interface names and information regarding device].

The rest of the patches are minor changes/fixes to various flows
in qed.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed May 24, 2017
2 parents 417ccf6 + 712c3cb commit 2a7f38c
Show file tree
Hide file tree
Showing 19 changed files with 223 additions and 82 deletions.
9 changes: 1 addition & 8 deletions drivers/net/ethernet/qlogic/qed/qed.h
Original file line number Diff line number Diff line change
Expand Up @@ -598,16 +598,11 @@ struct qed_dev {
enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)

#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)

u16 vendor_id;
u16 device_id;
#define QED_DEV_ID_MASK 0xff00
Expand All @@ -621,7 +616,6 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)

u16 chip_metal;
Expand All @@ -633,7 +627,7 @@ struct qed_dev {
#define CHIP_BOND_ID_SHIFT 0

u8 num_engines;
u8 num_ports_in_engines;
u8 num_ports_in_engine;
u8 num_funcs_in_port;

u8 path_id;
Expand All @@ -644,7 +638,6 @@ struct qed_dev {

int pcie_width;
int pcie_speed;
u8 ver_str[VER_SIZE];

/* Add MF related configuration */
u8 mcp_rev;
Expand Down
57 changes: 32 additions & 25 deletions drivers/net/ethernet/qlogic/qed/qed_dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1;

/* TC config is different for AH 4 port */
four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;

/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
Expand Down Expand Up @@ -329,7 +329,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;

/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
Expand Down Expand Up @@ -693,7 +693,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));

/* port table */
for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
port = &(qm_info->qm_port_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
Expand Down Expand Up @@ -823,7 +823,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
goto alloc_err;

qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
p_hwfn->cdev->num_ports_in_engines,
p_hwfn->cdev->num_ports_in_engine,
GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
Expand Down Expand Up @@ -1108,7 +1108,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL;
}

switch (p_hwfn->cdev->num_ports_in_engines) {
switch (p_hwfn->cdev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
Expand All @@ -1120,7 +1120,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
break;
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
p_hwfn->cdev->num_ports_in_engine);
return -EINVAL;
}

Expand Down Expand Up @@ -1253,7 +1253,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
}

memset(&params, 0, sizeof(params));
params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en;
Expand Down Expand Up @@ -1513,7 +1513,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);

/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
Expand Down Expand Up @@ -1697,6 +1698,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
return mfw_rc;
}

/* Check if there is a DID mismatch between nvm-cfg/efuse */
if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
DP_NOTICE(p_hwfn,
"warning: device configuration is not supported on this board type. The device may not function as expected.\n");

/* send DCBX attention request command */
DP_VERBOSE(p_hwfn,
QED_MSG_DCB,
Expand Down Expand Up @@ -1942,6 +1948,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt)
return -EAGAIN;

/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
*/
if (p_hwfn->p_rdma_info &&
p_hwfn->b_rdma_enabled_in_prs)
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);

/* Re-open incoming traffic */
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt);
Expand Down Expand Up @@ -2239,7 +2252,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_BDQ:
if (!*p_resc_num)
*p_resc_start = 0;
else if (p_hwfn->cdev->num_ports_in_engines == 4)
else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
*p_resc_start = p_hwfn->port_id;
Expand Down Expand Up @@ -2656,15 +2669,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);

if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1;
p_hwfn->cdev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
p_hwfn->cdev->num_ports_in_engines = 2;
p_hwfn->cdev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
p_hwfn->cdev->num_ports_in_engine);

/* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1;
/* Default num_ports_in_engine to something */
p_hwfn->cdev->num_ports_in_engine = 1;
}
}

Expand All @@ -2674,20 +2687,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
u32 port;
int i;

p_hwfn->cdev->num_ports_in_engines = 0;
p_hwfn->cdev->num_ports_in_engine = 0;

for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1)
p_hwfn->cdev->num_ports_in_engines++;
p_hwfn->cdev->num_ports_in_engine++;
}

if (!p_hwfn->cdev->num_ports_in_engines) {
if (!p_hwfn->cdev->num_ports_in_engine) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");

/* Default num_ports_in_engine to something */
p_hwfn->cdev->num_ports_in_engines = 1;
p_hwfn->cdev->num_ports_in_engine = 1;
}
}

Expand Down Expand Up @@ -2806,12 +2819,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);

if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
DP_NOTICE(cdev->hwfns,
"The chip type/rev (BB A0) is not supported!\n");
return -EINVAL;
}

return 0;
}

Expand Down Expand Up @@ -4061,7 +4068,7 @@ static int qed_device_num_ports(struct qed_dev *cdev)
if (cdev->num_hwfns > 1)
return 1;

return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
}

int qed_device_get_port_id(struct qed_dev *cdev)
Expand Down
8 changes: 8 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed_hsi.h
Original file line number Diff line number Diff line change
Expand Up @@ -11655,6 +11655,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3

#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)

u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
Expand Down Expand Up @@ -11780,6 +11782,12 @@ struct nvm_cfg1_glob {
u32 led_global_settings;
u32 generic_cont1;
u32 mbi_version;
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date;
u32 misc_sig;
u32 device_capabilities;
Expand Down
17 changes: 15 additions & 2 deletions drivers/net/ethernet/qlogic/qed/qed_l2.c
Original file line number Diff line number Diff line change
Expand Up @@ -2300,14 +2300,25 @@ static int qed_tunn_configure(struct qed_dev *cdev,

for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
struct qed_tunnel_info *tun;

tun = &hwfn->cdev->tunnel;
if (IS_PF(cdev)) {
p_ptt = qed_ptt_acquire(hwfn);
if (!p_ptt)
return -EAGAIN;
} else {
p_ptt = NULL;
}

rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
if (rc) {
if (IS_PF(cdev))
qed_ptt_release(hwfn, p_ptt);
return rc;
}

if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port;
Expand All @@ -2324,6 +2335,8 @@ static int qed_tunn_configure(struct qed_dev *cdev,

qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
if (IS_PF(cdev))
qed_ptt_release(hwfn, p_ptt);
}

return 0;
Expand Down
26 changes: 20 additions & 6 deletions drivers/net/ethernet/qlogic/qed/qed_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);

qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mbi_version);

qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);

Expand Down Expand Up @@ -335,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;

cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol;

if (params->is_vf)
Expand Down Expand Up @@ -606,6 +610,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}

static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
{
/* Calling the disable function will make sure that any
* currently-running function is completed. The following call to the
* enable function makes this sequence a flush-like operation.
*/
if (p_hwfn->b_sp_dpc_enabled) {
tasklet_disable(p_hwfn->sp_dpc);
tasklet_enable(p_hwfn->sp_dpc);
}
}

void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{
struct qed_dev *cdev = p_hwfn->cdev;
Expand All @@ -617,6 +633,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
synchronize_irq(cdev->int_params.msix_table[id].vector);
else
synchronize_irq(cdev->pdev->irq);

qed_slowpath_tasklet_flush(p_hwfn);
}

static void qed_slowpath_irq_free(struct qed_dev *cdev)
Expand Down Expand Up @@ -1111,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
return 0;
}

static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
char ver_str[VER_SIZE])
static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
{
int i;

memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);

memcpy(cdev->ver_str, ver_str, VER_SIZE);
cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}

static u32 qed_sb_init(struct qed_dev *cdev,
Expand Down Expand Up @@ -1675,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
.set_id = &qed_set_id,
.set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop,
Expand Down
30 changes: 30 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed_mcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1523,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0;
}

int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver)
{
u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;

if (IS_VF(p_hwfn->cdev))
return -EINVAL;

/* Read the address of the nvm_cfg */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
return -EINVAL;
}

/* Read the offset of nvm_cfg1 */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);

mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, mbi_version);
*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
mbi_ver_addr) &
(NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
NVM_CFG1_GLOB_MBI_VERSION_2_MASK);

return 0;
}

int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
Expand Down
Loading

0 comments on commit 2a7f38c

Please sign in to comment.