Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 225223
b: refs/heads/master
c: 895950c
h: refs/heads/master
i:
  225221: cfe2f81
  225219: 71d5adb
  225215: 59d2f1e
v: v3
  • Loading branch information
Joe Perches committed Dec 21, 2010
1 parent 596ebfe commit 6cca1c7
Show file tree
Hide file tree
Showing 15 changed files with 297 additions and 314 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4b4f54670859074cf7670cc6fa96fe34a65846d9
refs/heads/master: 895950c2a6565d9eefda4a38b00fa28537e39fcb
2 changes: 2 additions & 0 deletions trunk/drivers/net/bnx2.c
Original file line number Diff line number Diff line change
Expand Up @@ -8393,6 +8393,8 @@ bnx2_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);

cancel_work_sync(&bp->reset_task);

unregister_netdev(dev);

if (bp->mips_firmware)
Expand Down
3 changes: 3 additions & 0 deletions trunk/drivers/net/bnx2x/bnx2x_link.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,9 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
struct bnx2x *bp = params->bp;
u32 val = 0;

if ((1 < strict_cos) && (NULL == params))
return -EINVAL;

DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
/**
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
Expand Down
24 changes: 10 additions & 14 deletions trunk/drivers/net/sfc/efx.c
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,9 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}

spin_lock_init(&channel->tx_stop_lock);
atomic_set(&channel->tx_stop_count, 1);

rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
Expand Down Expand Up @@ -1403,11 +1406,11 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);

if (efx_dev_registered(efx))
netif_tx_wake_all_queues(efx->net_dev);

efx_for_each_channel(channel, efx)
efx_for_each_channel(channel, efx) {
if (efx_dev_registered(efx))
efx_wake_queue(channel);
efx_start_channel(channel);
}

if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
Expand Down Expand Up @@ -1495,7 +1498,9 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
netif_tx_stop_all_queues(efx->net_dev);
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
efx_stop_queue(channel);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
Expand Down Expand Up @@ -1891,7 +1896,6 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct efx_channel *channel;
int rc;

net_dev->watchdog_timeo = 5 * HZ;
Expand All @@ -1914,14 +1918,6 @@ static int efx_register_netdev(struct efx_nic *efx)
if (rc)
goto fail_locked;

efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->core_txq = netdev_get_tx_queue(
efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
}
}

/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);

Expand Down
2 changes: 2 additions & 0 deletions trunk/drivers/net/sfc/efx.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern void efx_stop_queue(struct efx_channel *channel);
extern void efx_wake_queue(struct efx_channel *channel);

/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
Expand Down
13 changes: 11 additions & 2 deletions trunk/drivers/net/sfc/net_driver.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ struct efx_tx_buffer {
* @efx: The associated Efx NIC
* @queue: DMA queue number
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
Expand All @@ -149,6 +148,8 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
* @stopped: Stopped count.
* Set if this TX queue is currently stopping its port.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
Expand Down Expand Up @@ -178,7 +179,7 @@ struct efx_tx_queue {
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue;
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_nic *nic;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
unsigned int ptr_mask;
Expand All @@ -187,6 +188,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
int stopped;

/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
Expand Down Expand Up @@ -319,6 +321,7 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
Expand All @@ -339,6 +342,8 @@ enum efx_rx_alloc_method {
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @rx_queue: RX queue for this channel
* @tx_stop_count: Core TX queue stop count
* @tx_stop_lock: Core TX queue stop lock
* @tx_queue: TX queues for this channel
*/
struct efx_channel {
Expand Down Expand Up @@ -377,6 +382,10 @@ struct efx_channel {
bool rx_pkt_csummed;

struct efx_rx_queue rx_queue;

atomic_t tx_stop_count;
spinlock_t tx_stop_lock;

struct efx_tx_queue tx_queue[2];
};

Expand Down
111 changes: 92 additions & 19 deletions trunk/drivers/net/sfc/tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,50 @@
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)

/* We need to be able to nest calls to netif_tx_stop_queue(), partly
* because of the 2 hardware queues associated with each core queue,
* but also so that we can inhibit TX for reasons other than a full
* hardware queue. */
void efx_stop_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);

if (!tx_queue)
return;

spin_lock_bh(&channel->tx_stop_lock);
netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");

atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue(
netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES));

spin_unlock_bh(&channel->tx_stop_lock);
}

/* Decrement core TX queue stop count and wake it if the count is 0 */
void efx_wake_queue(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);

if (!tx_queue)
return;

local_bh_disable();
if (atomic_dec_and_lock(&channel->tx_stop_count,
&channel->tx_stop_lock)) {
netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue(
netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock(&channel->tx_stop_lock);
}
local_bh_enable();
}

static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
{
Expand Down Expand Up @@ -190,22 +234,20 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* checked. Update the xmit path's
* copy of read_count.
*/
netif_tx_stop_queue(tx_queue->core_txq);
++tx_queue->stopped;
/* This memory barrier protects the
* change of queue state from the access
* change of stopped from the access
* of read_count. */
smp_mb();
tx_queue->old_read_count =
ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0)) {
rc = NETDEV_TX_BUSY;
goto unwind;
}
if (unlikely(q_space-- <= 0))
goto stop;
smp_mb();
netif_tx_start_queue(tx_queue->core_txq);
--tx_queue->stopped;
}

insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Expand Down Expand Up @@ -265,6 +307,13 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)

/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
goto unwind;

stop:
rc = NETDEV_TX_BUSY;

if (tx_queue->stopped == 1)
efx_stop_queue(tx_queue->channel);

unwind:
/* Work backwards until we hit the original insert pointer value */
Expand Down Expand Up @@ -351,21 +400,32 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
struct netdev_queue *queue;

EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);

efx_dequeue_buffers(tx_queue, index);

/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
* queue state. */
* separates the update of read_count from the test of
* stopped. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled)) {
if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
netif_tx_wake_queue(tx_queue->core_txq);

/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
queue = netdev_get_tx_queue(
efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES);
__netif_tx_lock(queue, smp_processor_id());
if (tx_queue->stopped) {
tx_queue->stopped = 0;
efx_wake_queue(tx_queue->channel);
}
__netif_tx_unlock(queue);
}
}

Expand Down Expand Up @@ -427,6 +487,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
BUG_ON(tx_queue->stopped);

/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
Expand Down Expand Up @@ -462,6 +523,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)

/* Free up TSO header cache */
efx_fini_tso(tx_queue);

/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
efx_wake_queue(tx_queue->channel);
}
}

void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
Expand Down Expand Up @@ -703,9 +770,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
*/
netif_tx_stop_queue(tx_queue->core_txq);
++tx_queue->stopped;
/* This memory barrier protects the change of
* queue state from the access of read_count. */
* stopped from the access of read_count. */
smp_mb();
tx_queue->old_read_count =
ACCESS_ONCE(tx_queue->read_count);
Expand All @@ -717,7 +784,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
return 1;
}
smp_mb();
netif_tx_start_queue(tx_queue->core_txq);
--tx_queue->stopped;
}

insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
Expand Down Expand Up @@ -1057,10 +1124,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,

while (1) {
rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
if (unlikely(rc)) {
rc2 = NETDEV_TX_BUSY;
goto unwind;
}
if (unlikely(rc))
goto stop;

/* Move onto the next fragment? */
if (state.in_len == 0) {
Expand Down Expand Up @@ -1089,6 +1154,14 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
netif_err(efx, tx_err, efx->net_dev,
"Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any(skb);
goto unwind;

stop:
rc2 = NETDEV_TX_BUSY;

/* Stop the queue if it wasn't stopped before. */
if (tx_queue->stopped == 1)
efx_stop_queue(tx_queue->channel);

unwind:
/* Free the DMA mapping we were in the process of writing out */
Expand Down
Loading

0 comments on commit 6cca1c7

Please sign in to comment.