Skip to content

Commit

Permalink
qed: Add infrastructure support for tunneling
Browse files Browse the repository at this point in the history
This patch adds various structure/APIs needed to configure/enable different
tunnel [VXLAN/GRE/GENEVE] parameters on the adapter.

Signed-off-by: Manish Chopra <manish.chopra@qlogic.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Manish Chopra authored and David S. Miller committed Apr 15, 2016
1 parent ee1c279 commit 464f664
Show file tree
Hide file tree
Showing 11 changed files with 563 additions and 4 deletions.
46 changes: 46 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,51 @@ struct qed_rt_data {
bool *b_valid;
};

enum qed_tunn_mode {
QED_MODE_L2GENEVE_TUNN,
QED_MODE_IPGENEVE_TUNN,
QED_MODE_L2GRE_TUNN,
QED_MODE_IPGRE_TUNN,
QED_MODE_VXLAN_TUNN,
};

enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VLAN,
QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI,
MAX_QED_TUNN_CLSS,
};

struct qed_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};

struct qed_tunn_update_params {
unsigned long tunn_mode_update_mask;
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_rx_pf_clss;
u8 update_tx_pf_clss;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};

/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
Expand Down Expand Up @@ -430,6 +475,7 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];

unsigned long tunn_mode;
u32 drv_type;

struct qed_eth_stats *reset_stats;
Expand Down
6 changes: 4 additions & 2 deletions drivers/net/ethernet/qlogic/qed/qed_dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,

static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunn_start_params *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
Expand Down Expand Up @@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);

/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
Expand Down Expand Up @@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}

int qed_hw_init(struct qed_dev *cdev,
struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
Expand Down Expand Up @@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode,
p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode,
allow_npar_tx_switch);
break;
Expand Down
2 changes: 2 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed_dev_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init -
*
* @param cdev
* @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
Expand All @@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
Expand Down
51 changes: 50 additions & 1 deletion drivers/net/ethernet/qlogic/qed/qed_hsi.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED,
COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_RESERVED3,
COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
Expand Down Expand Up @@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
u8 reserved0[4];
};

/* tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan;
u8 tx_enable_l2geneve;
u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre;
u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 reserved[3];
};

struct pf_update_ramrod_data {
u32 reserved[2];
u32 reserved_1[6];
struct pf_update_tunnel_config tunnel_config;
};

/* Tunnel classification scheme */
enum tunnel_clss {
TUNNEL_CLSS_MAC_VLAN = 0,
TUNNEL_CLSS_MAC_VNI,
TUNNEL_CLSS_INNER_MAC_VLAN,
TUNNEL_CLSS_INNER_MAC_VNI,
MAX_TUNNEL_CLSS
};

enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */,
Expand Down Expand Up @@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);

void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool eth_gre_enable,
bool ip_gre_enable);
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool eth_geneve_enable,
bool ip_geneve_enable);

/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
Expand Down
127 changes: 127 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
Original file line number Diff line number Diff line change
Expand Up @@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,

return true;
}

static void
qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
{
if (enable)
set_bit(bit, var);
else
clear_bit(bit, var);
}

#define PRS_ETH_TUNN_FIC_FORMAT -188897008

void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}

void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool vxlan_enable)
{
unsigned long reg_val = 0;
u8 shift;

reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);

if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);

reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);

qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}

void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
unsigned long reg_val = 0;
u8 shift;

reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);

reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);

shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);

qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
ip_gre_enable ? 1 : 0);
}

void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}

void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable,
bool ip_geneve_enable)
{
unsigned long reg_val = 0;
u8 shift;

reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);

shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);

qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);

qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);

/* comp ver */
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);

/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;

qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
31 changes: 31 additions & 0 deletions drivers/net/ethernet/qlogic/qed/qed_l2.c
Original file line number Diff line number Diff line change
Expand Up @@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0;
}

static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params)
{
struct qed_tunn_update_params tunn_info;
int i, rc;

memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) {
tunn_info.update_vxlan_udp_port = 1;
tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
}

if (tunn_params->update_geneve_port == 1) {
tunn_info.update_geneve_udp_port = 1;
tunn_info.geneve_udp_port = tunn_params->geneve_port;
}

for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];

rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);

if (rc)
return rc;
}

return 0;
}

static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type)
{
Expand Down Expand Up @@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
.tunn_config = &qed_tunn_configure,
};

const struct qed_eth_ops *qed_get_eth_ops(void)
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/qlogic/qed/qed_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -776,7 +776,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
/* Start the slowpath */
data = cdev->firmware->data;

rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
rc = qed_hw_init(cdev, NULL, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err2;
Expand Down
Loading

0 comments on commit 464f664

Please sign in to comment.