Skip to content

Commit

Permalink
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbe and igb.

Alexander Duyck (13):
  ixgbe: Initialize q_vector cpu and affinity masks correctly
  ixgbe: Enable jumbo frames support w/ SR-IOV
  ixgbe: Move message handling routines into their own functions
  ixgbe: Add mailbox API version negotiation support to ixgbe PF
  igb: Split Rx timestamping into two separate functions
  igb: Do not use header split, instead receive all frames into a
    single buffer
  igb: Combine post-processing of skb into a single function
  igb: Map entire page and sync half instead of mapping and unmapping
    half pages
  igb: Move rx_buffer related code in Rx cleanup path into separate
    function
  igb: Lock buffer size at 2K even on systems with larger pages
  igb: Combine q_vector and ring allocation into a single function
  igb: Move the calls to set the Tx and Rx queues into igb_open
  igb: Split igb_update_dca into separate Tx and Rx functions

Tushar Dave (1):
  igb: Correcting and improving small packet check and padding
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Oct 20, 2012
2 parents 1b6f0f9 + 6a05004 commit 72ec301
Show file tree
Hide file tree
Showing 11 changed files with 1,195 additions and 633 deletions.
3 changes: 3 additions & 0 deletions drivers/net/ethernet/intel/igb/e1000_82575.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */

#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */

/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
Expand Down
70 changes: 42 additions & 28 deletions drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522

/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048

/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
Expand Down Expand Up @@ -174,11 +175,9 @@ struct igb_tx_buffer {
};

struct igb_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
u32 page_offset;
unsigned int page_offset;
};

struct igb_tx_queue_stats {
Expand All @@ -205,22 +204,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};

struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */

struct igb_ring_container rx, tx;

struct napi_struct napi;

u16 itr_val;
u8 set_itr;
void __iomem *itr_register;

char name[IFNAMSIZ + 9];
};

struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
Expand All @@ -232,15 +215,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */

u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
u32 size; /* length of desc. ring in bytes */

/* everything past this point are written often */
u16 next_to_clean ____cacheline_aligned_in_smp;
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;

union {
/* TX */
Expand All @@ -251,12 +236,30 @@ struct igb_ring {
};
/* RX */
struct {
struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
/* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */
} ____cacheline_internodealigned_in_smp;

struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */

u16 itr_val;
u8 set_itr;
void __iomem *itr_register;

struct igb_ring_container rx, tx;

struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];

/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};

enum e1000_ring_flags_t {
Expand Down Expand Up @@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
igb_ptp_rx_rgtstamp(q_vector, skb);
}

extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
#endif /* CONFIG_IGB_PTP */
Expand Down
53 changes: 31 additions & 22 deletions drivers/net/ethernet/intel/igb/igb_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/highmem.h>

#include "igb.h"

Expand Down Expand Up @@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}

static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
unsigned int frame_size)
{
frame_size /= 2;
if (*(skb->data + 3) == 0xFF) {
if ((*(skb->data + frame_size + 10) == 0xBE) &&
(*(skb->data + frame_size + 12) == 0xAF)) {
return 0;
}
}
return 13;
unsigned char *data;
bool match = true;

frame_size >>= 1;

data = kmap(rx_buffer->page);

if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;

kunmap(rx_buffer->page);

return match;
}

static int igb_clean_test_rings(struct igb_ring *rx_ring,
Expand All @@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
unsigned int total_bytes = 0, total_packets = 0;

/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
Expand All @@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];

/* unmap rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);

/* verify contents of skb */
if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;

/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);

/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
total_bytes += tx_buffer_info->bytecount;
total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);

/* increment rx/tx next to clean counters */
Expand All @@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}

txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
netdev_tx_reset_queue(txring_txq(tx_ring));

/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
Expand Down
Loading

0 comments on commit 72ec301

Please sign in to comment.