Skip to content

Commit

Permalink
Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git…
Browse files Browse the repository at this point in the history
…/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2017-10-09

This series contains updates to ixgbe only.

Emil fixes an issue where the semaphore bits could be stuck after a reset
or a crash, by adding the clearing of software resource bits in the
software/firmware synchronization register.  Added error checks when we
attempt to identify and initialize the PHY to prevent a crash.  Fixed a
few issues in the logic of ixgbe_clean_test_rings() which was exposed by
a previous commit that was causing a crash in ethtool diagnostics.

Bhumika Goyal fixes a couple of instances which were overlooked when we
made ixgbe_mac_operations constant.

Shannon Nelson fixes an issue to restore normal operations after the
last MACVLAN offload is removed, otherwise we get stuck in a single queue
operations.

The infamous Jesper Dangaard Brouer adds a counter which counts the
number of times the recycle fails and the real page allocator is invoked.

Alex updates the adaptive ITR algorithm to better support the needs of the
network.  This attempt to make it so that our ITR algorithm will try to
prevent either starving a socket buffer for memory in the case of
transmit, or overrunning an receive socket buffer on receive.  We should
function better with new features like XDP which can handle small packets
at high rates without needing to lock us into NAPI polling mode.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Oct 9, 2017
2 parents 2e997d8 + b64666a commit 0349a86
Showing 7 changed files with 259 additions and 89 deletions.
9 changes: 9 additions & 0 deletions drivers/net/ethernet/intel/ixgbe/ixgbe.h
Original file line number Diff line number Diff line change
@@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
@@ -434,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))

#define IXGBE_ITR_ADAPTIVE_MIN_INC 2
#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
#define IXGBE_ITR_ADAPTIVE_BULK 0x00

struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned long next_update; /* jiffies value of last update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
@@ -655,6 +663,7 @@ struct ixgbe_adapter {
u64 rsc_total_count;
u64 rsc_total_flush;
u64 non_eop_descs;
u32 alloc_rx_page;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;

8 changes: 4 additions & 4 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
Original file line number Diff line number Diff line change
@@ -3800,10 +3800,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));

for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
@@ -4100,8 +4100,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false;

fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
fwsm &= IXGBE_FWSM_MODE_MASK;
return fwsm == IXGBE_FWSM_FW_MODE_PT;

return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
}

/**
54 changes: 35 additions & 19 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
Original file line number Diff line number Diff line change
@@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
{"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -1916,16 +1917,45 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
unsigned int size)
{
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0;

/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);

while (tx_ntc != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_buffer;

tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);

/* if DD is not set transmit has not completed */
if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
return count;

/* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];

/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);

/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);

/* increment Tx next to clean counter */
tx_ntc++;
if (tx_ntc == tx_ring->count)
tx_ntc = 0;
}

while (rx_desc->wb.upper.length) {
struct ixgbe_rx_buffer *rx_buffer;

/* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];

@@ -1938,33 +1968,19 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* verify contents of skb */
if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++;
else
break;

/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer->dma,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);

/* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];

/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);

/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);

/* increment Rx/Tx next to clean counters */
/* increment Rx next to clean counter */
rx_ntc++;
if (rx_ntc == rx_ring->count)
rx_ntc = 0;
tx_ntc++;
if (tx_ntc == tx_ring->count)
tx_ntc = 0;

/* fetch next descriptor */
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
11 changes: 9 additions & 2 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
Original file line number Diff line number Diff line change
@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
ring->next = head->ring;
head->ring = ring;
head->count++;
head->next_update = jiffies + 1;
}

/**
@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;

/* initialize pointer to rings */
ring = q_vector->ring;
/* Initialize setting for adaptive ITR */
q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;
q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
IXGBE_ITR_ADAPTIVE_LATENCY;

/* intialize ITR */
if (txr_count && !rxr_count) {
@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->itr = adapter->rx_itr_setting;
}

/* initialize pointer to rings */
ring = q_vector->ring;

while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
Loading

0 comments on commit 0349a86

Please sign in to comment.