Skip to content

Commit

Permalink
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to i40e only.

Anjali provides two cleanups to remove unnecessary code and a fix
to resolve debugfs dumping only half the NVM.  Then provides a fix
to ethtool NVM reads where shadow RAM was used instead of actual
NVM reads.

Jesse provides a couple of fixes, one removes custom i40e functions
which duplicate existing kernel functionality.  Second fixes constant
cast issues by replacing __constant_htons with htons.

Mitch provides a couple of fixes for the VF interfaces in i40e.  First
provides a fix to guard against VF message races with can cause a panic.
Second fix reinitializes the buffer size each time we clean the ARQ,
because subsequent messages can be truncated. Lastly adds functionality
to enable/disable ICR 0 dynamically.

Vasu adds a simple guard against multiple includes of the i40e_txrx.h
file.

Shannon provides a couple of fixes, first fix swaps a couple of lines
around in the error handling if the allocation for the VSI array fails.
Second fixes an issue where we try to free the q_vector that has not
been setup which can panic the kernel.

David provides a patch to save off the point to memory and the length
of 2 structs used in the admin queue in order to store all info about
allocated kernel memory.

Neerav fixes ring allocation where allocation and clearing of rings
for a VSI should be using the alloc_queue_pairs and not num_queue_pairs.
Then removes the unused define for multi-queue enabled.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jan 6, 2014
2 parents 1e85c9b + c3f0c4f commit 3a2e15d
Show file tree
Hide file tree
Showing 11 changed files with 162 additions and 185 deletions.
40 changes: 15 additions & 25 deletions drivers/net/ethernet/intel/i40e/i40e.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
#define I40E_MAX_NPAR_QPS 32

#define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_REGISTER 0x0038FFFF
#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
Expand Down Expand Up @@ -230,28 +230,24 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 21)
#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 22)
#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif

u16 num_tx_queues;
u16 num_rx_queues;

bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
Expand Down Expand Up @@ -521,13 +517,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);

/* needed by i40e_main.c */
void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
struct i40e_ring *tx_ring);
int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_pf *pf, bool add);

Expand Down Expand Up @@ -565,6 +554,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
Expand Down
103 changes: 44 additions & 59 deletions drivers/net/ethernet/intel/i40e/i40e_adminq.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,31 +66,23 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_virt_mem mem;

ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;

hw->aq.asq.desc = hw->aq.asq_mem.va;
hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;

ret_code = i40e_allocate_virt_mem(hw, &mem,
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
i40e_free_dma_mem(hw, &hw->aq.asq_mem);
hw->aq.asq_mem.va = NULL;
hw->aq.asq_mem.pa = 0;
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}

hw->aq.asq.details = mem.va;

return ret_code;
}

Expand All @@ -102,16 +94,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;

ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;

hw->aq.arq.desc = hw->aq.arq_mem.va;
hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;

return ret_code;
}
Expand All @@ -125,14 +112,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
struct i40e_virt_mem mem;

i40e_free_dma_mem(hw, &hw->aq.asq_mem);
hw->aq.asq_mem.va = NULL;
hw->aq.asq_mem.pa = 0;
mem.va = hw->aq.asq.details;
i40e_free_virt_mem(hw, &mem);
hw->aq.asq.details = NULL;
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}

/**
Expand All @@ -144,9 +124,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
i40e_free_dma_mem(hw, &hw->aq.arq_mem);
hw->aq.arq_mem.va = NULL;
hw->aq.arq_mem.pa = 0;
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}

/**
Expand All @@ -157,7 +135,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;

Expand All @@ -166,11 +143,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/

/* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
sizeof(struct i40e_dma_mem)));
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;

/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
Expand Down Expand Up @@ -212,8 +189,7 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
mem.va = hw->aq.arq.r.arq_bi;
i40e_free_virt_mem(hw, &mem);
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);

return ret_code;
}
Expand All @@ -225,16 +201,15 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;

/* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
sizeof(struct i40e_dma_mem)));
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;

/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
Expand All @@ -254,8 +229,7 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
mem.va = hw->aq.asq.r.asq_bi;
i40e_free_virt_mem(hw, &mem);
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);

return ret_code;
}
Expand All @@ -266,14 +240,17 @@ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
struct i40e_virt_mem mem;
int i;

/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);

mem.va = hw->aq.arq.r.arq_bi;
i40e_free_virt_mem(hw, &mem);
/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);

/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}

/**
Expand All @@ -282,17 +259,21 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw)
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
struct i40e_virt_mem mem;
int i;

/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);

/* now free the buffer info list */
mem.va = hw->aq.asq.r.asq_bi;
i40e_free_virt_mem(hw, &mem);
/* free the buffer info list */
i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);

/* free the descriptor memory */
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);

/* free the dma header */
i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}

/**
Expand All @@ -305,14 +286,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
wr32(hw, I40E_VF_ATQBAH1,
upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQBAL1,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
wr32(hw, I40E_PF_ATQBAH,
upper_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQBAL,
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
}
Expand All @@ -328,14 +313,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
{
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
wr32(hw, I40E_VF_ARQBAH1,
upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQBAL1,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
wr32(hw, I40E_PF_ARQBAH,
upper_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQBAL,
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
}
Expand Down Expand Up @@ -483,8 +472,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)

/* free ring buffers */
i40e_free_asq_bufs(hw);
/* free the ring descriptors */
i40e_free_adminq_asq(hw);

mutex_unlock(&hw->aq.asq_mutex);

Expand Down Expand Up @@ -516,8 +503,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)

/* free ring buffers */
i40e_free_arq_bufs(hw);
/* free the ring descriptors */
i40e_free_adminq_arq(hw);

mutex_unlock(&hw->aq.arq_mutex);

Expand Down
13 changes: 5 additions & 8 deletions drivers/net/ethernet/intel/i40e/i40e_adminq.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,20 +32,20 @@
#include "i40e_adminq_cmd.h"

#define I40E_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc))[i]))
(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))

#define I40E_ADMINQ_DESC_ALIGNMENT 4096

struct i40e_adminq_ring {
void *desc; /* Descriptor ring memory */
void *details; /* ASQ details */
struct i40e_virt_mem dma_head; /* space for dma structures */
struct i40e_dma_mem desc_buf; /* descriptor ring memory */
struct i40e_virt_mem cmd_buf; /* command buffer memory */

union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;

u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */

Expand All @@ -70,7 +70,7 @@ struct i40e_asq_cmd_details {
};

#define I40E_ADMINQ_DETAILS(R, i) \
(&(((struct i40e_asq_cmd_details *)((R).details))[i]))
(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))

/* ARQ event information */
struct i40e_arq_event_info {
Expand All @@ -95,9 +95,6 @@ struct i40e_adminq_info {
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */

struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */

/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
Expand Down
Loading

0 comments on commit 3a2e15d

Please sign in to comment.