Skip to content

Commit

Permalink
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git…
Browse files Browse the repository at this point in the history
…/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-11-27 (i40e, iavf)

This series contains updates to i40e and iavf drivers.

Ivan Vecera performs more cleanups on i40e and iavf drivers; removing
unused fields, defines, and unneeded fields.

Petr Oros utilizes iavf_schedule_aq_request() helper to replace open
coded equivalents.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  iavf: use iavf_schedule_aq_request() helper
  iavf: Remove queue tracking fields from iavf_adminq_ring
  i40e: Remove queue tracking fields from i40e_adminq_ring
  i40e: Remove AQ register definitions for VF types
  i40e: Delete unused and useless i40e_pf fields
====================

Link: https://lore.kernel.org/r/20231127211037.1135403-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Jakub Kicinski committed Nov 29, 2023
2 parents cd04b44 + 9526081 commit f1be1e0
Show file tree
Hide file tree
Showing 12 changed files with 90 additions and 200 deletions.
16 changes: 0 additions & 16 deletions drivers/net/ethernet/intel/i40e/i40e.h
Original file line number Diff line number Diff line change
Expand Up @@ -468,9 +468,7 @@ struct i40e_pf {
struct i40e_hw hw;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
bool fc_autoneg_status;

u16 eeprom_version;
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
Expand All @@ -486,7 +484,6 @@ struct i40e_pf {
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;

struct hlist_head fdir_filter_list;
Expand Down Expand Up @@ -524,12 +521,10 @@ struct i40e_pf {
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;

enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
Expand All @@ -543,7 +538,6 @@ struct i40e_pf {
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
Expand All @@ -565,17 +559,13 @@ struct i40e_pf {
struct i40e_lump_tracking *irq_pile;

/* switch config info */
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
bool cur_promisc;

u16 instance; /* A unique number per i40e_pf instance in the system */

/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
Expand Down Expand Up @@ -669,20 +659,15 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
struct work_struct ptp_pps_work;
struct work_struct ptp_extts0_work;
struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
u64 ptp_pps_start;
u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
Expand All @@ -695,7 +680,6 @@ struct i40e_pf {
u32 fd_inv;
u16 phy_led_val;

u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
/* List to keep previous DDP profiles to be rolled back in the future */
Expand Down
86 changes: 31 additions & 55 deletions drivers/net/ethernet/intel/i40e/i40e_adminq.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,6 @@

static void i40e_resume_aq(struct i40e_hw *hw);

/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
static void i40e_adminq_init_regs(struct i40e_hw *hw)
{
/* set head and tail registers in our local struct */
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.asq.bal = I40E_PF_ATQBAL;
hw->aq.asq.bah = I40E_PF_ATQBAH;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
hw->aq.arq.len = I40E_PF_ARQLEN;
hw->aq.arq.bal = I40E_PF_ARQBAL;
hw->aq.arq.bah = I40E_PF_ARQBAH;
}

/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
Expand Down Expand Up @@ -254,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
u32 reg = 0;

/* Clear Head and Tail */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, I40E_PF_ATQH, 0);
wr32(hw, I40E_PF_ATQT, 0);

/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa));

/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.asq.bal);
reg = rd32(hw, I40E_PF_ATQBAL);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = -EIO;

Expand All @@ -283,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
u32 reg = 0;

/* Clear Head and Tail */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, I40E_PF_ARQH, 0);
wr32(hw, I40E_PF_ARQT, 0);

/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa));

/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1);

/* Check one register to verify that config was applied */
reg = rd32(hw, hw->aq.arq.bal);
reg = rd32(hw, I40E_PF_ARQBAL);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = -EIO;

Expand Down Expand Up @@ -439,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
}

/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
wr32(hw, hw->aq.asq.len, 0);
wr32(hw, hw->aq.asq.bal, 0);
wr32(hw, hw->aq.asq.bah, 0);
wr32(hw, I40E_PF_ATQH, 0);
wr32(hw, I40E_PF_ATQT, 0);
wr32(hw, I40E_PF_ATQLEN, 0);
wr32(hw, I40E_PF_ATQBAL, 0);
wr32(hw, I40E_PF_ATQBAH, 0);

hw->aq.asq.count = 0; /* to indicate uninitialized queue */

Expand Down Expand Up @@ -473,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
}

/* Stop firmware AdminQ processing */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
wr32(hw, hw->aq.arq.len, 0);
wr32(hw, hw->aq.arq.bal, 0);
wr32(hw, hw->aq.arq.bah, 0);
wr32(hw, I40E_PF_ARQH, 0);
wr32(hw, I40E_PF_ARQT, 0);
wr32(hw, I40E_PF_ARQLEN, 0);
wr32(hw, I40E_PF_ARQBAL, 0);
wr32(hw, I40E_PF_ARQBAH, 0);

hw->aq.arq.count = 0; /* to indicate uninitialized queue */

Expand Down Expand Up @@ -608,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}

/* Set up register offsets */
i40e_adminq_init_regs(hw);

/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;

Expand Down Expand Up @@ -720,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)

desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
while (rd32(hw, I40E_PF_ATQH) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
"ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH));

if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
Expand Down Expand Up @@ -756,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use;

}

Expand Down Expand Up @@ -797,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,

hw->aq.asq_last_status = I40E_AQ_RC_OK;

val = rd32(hw, hw->aq.asq.head);
val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
Expand Down Expand Up @@ -889,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use);

/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
Expand Down Expand Up @@ -949,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = -EIO;
Expand Down Expand Up @@ -1103,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
}

/* set next_to_use to head */
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = -EALREADY;
Expand Down Expand Up @@ -1151,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));

/* set tail = the last cleaned desc index. */
wr32(hw, hw->aq.arq.tail, ntc);
wr32(hw, I40E_PF_ARQT, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
Expand Down
7 changes: 0 additions & 7 deletions drivers/net/ethernet/intel/i40e/i40e_adminq.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,6 @@ struct i40e_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;

/* used for queue tracking */
u32 head;
u32 tail;
u32 len;
u32 bah;
u32 bal;
};

/* ASQ transaction details */
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/ethernet/intel/i40e/i40e_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,11 +195,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
return !!(rd32(hw, hw->aq.asq.len) &
I40E_PF_ATQLEN_ATQENABLE_MASK);
else
/* Check if the queue is initialized */
if (!hw->aq.asq.count)
return false;

return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}

/**
Expand Down
3 changes: 0 additions & 3 deletions drivers/net/ethernet/intel/i40e/i40e_debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1028,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"emp reset count: %d\n", pf->empr_count);
dev_info(&pf->pdev->dev,
"pf reset count: %d\n", pf->pfr_count);
dev_info(&pf->pdev->dev,
"pf tx sluggish count: %d\n",
pf->tx_sluggish_count);
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
struct i40e_aqc_query_port_ets_config_resp *bw_data;
struct i40e_dcbx_config *cfg =
Expand Down
Loading

0 comments on commit f1be1e0

Please sign in to comment.