Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 154290
b: refs/heads/master
c: a620c16
h: refs/heads/master
v: v3
  • Loading branch information
David Daney authored and Ralf Baechle committed Jun 24, 2009
1 parent e2b0bab commit b6338d1
Show file tree
Hide file tree
Showing 5 changed files with 107 additions and 67 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f696a10838ffab85e5bc07e7cff0d0e1870a30d7
refs/heads/master: a620c1632629b42369e78448acc7b384fe1faf48
2 changes: 2 additions & 0 deletions trunk/drivers/staging/octeon/ethernet-defines.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@

/* Maximum number of packets to process per interrupt. */
#define MAX_RX_PACKETS 120
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE 10
#define MAX_OUT_QUEUE_DEPTH 1000

#ifndef CONFIG_SMP
Expand Down
56 changes: 33 additions & 23 deletions trunk/drivers/staging/octeon/ethernet-tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@

#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-tx.h"
#include "ethernet-util.h"

#include "cvmx-wqe.h"
Expand Down Expand Up @@ -82,8 +83,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
uint64_t old_scratch2;
int dropped;
int qos;
int queue_it_up;
struct octeon_ethernet *priv = netdev_priv(dev);
int32_t in_use;
int32_t skb_to_free;
int32_t undo;
int32_t buffers_to_free;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
Expand Down Expand Up @@ -120,15 +123,15 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);

/*
* Assume we're going to be able t osend this
* packet. Fetch and increment the number of pending
* packets for output.
* Fetch and increment the number of packets to be
* freed.
*/
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
FAU_NUM_PACKET_BUFFERS_TO_FREE,
0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
priv->fau + qos * 4, 1);
priv->fau + qos * 4,
MAX_SKB_TO_FREE);
}

/*
Expand Down Expand Up @@ -286,15 +289,29 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
} else {
/* Get the number of skbuffs in use by the hardware */
in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
MAX_SKB_TO_FREE);
buffers_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}

/*
* We try to claim MAX_SKB_TO_FREE buffers. If there were not
* that many available, we have to un-claim (undo) any that
* were in excess. If skb_to_free is positive we will free
* that many buffers.
*/
undo = skb_to_free > 0 ?
MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
MAX_SKB_TO_FREE : -skb_to_free;

/*
* If we're sending faster than the receive can free them then
* don't do the HW free.
Expand Down Expand Up @@ -330,38 +347,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
}

queue_it_up = 0;
if (unlikely(dropped)) {
dev_kfree_skb_any(skb);
cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
priv->stats.tx_dropped++;
} else {
if (USE_SKBUFFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
skb_queue_tail(&priv->tx_free_list[qos], skb);
else {
queue_it_up = 1;
else
cvmx_fau_atomic_add32
(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
}
} else {
/* Put this packet on the queue to be freed later */
skb_queue_tail(&priv->tx_free_list[qos], skb);
queue_it_up = 1;
}
}

/* Free skbuffs not in use by the hardware, possibly two at a time */
if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
if (queue_it_up) {
spin_lock(&priv->tx_free_list[qos].lock);
/*
* Check again now that we have the lock. It might
* have changed.
*/
if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
__skb_queue_tail(&priv->tx_free_list[qos], skb);
cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
spin_unlock(&priv->tx_free_list[qos].lock);
} else {
cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}

return 0;
Expand Down
25 changes: 25 additions & 0 deletions trunk/drivers/staging/octeon/ethernet-tx.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,28 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_shutdown(struct net_device *dev);

/**
* Free dead transmit skbs.
*
* @priv: The driver data
* @skb_to_free: The number of SKBs to free (free none if negative).
* @qos: The queue to free from.
* @take_lock: If true, acquire the skb list lock.
*/
static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
int skb_to_free,
int qos, int take_lock)
{
/* Free skbuffs not in use by the hardware. */
if (skb_to_free > 0) {
if (take_lock)
spin_lock(&priv->tx_free_list[qos].lock);
while (skb_to_free > 0) {
dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
skb_to_free--;
}
if (take_lock)
spin_unlock(&priv->tx_free_list[qos].lock);
}
}
89 changes: 46 additions & 43 deletions trunk/drivers/staging/octeon/ethernet.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,14 @@
#include <asm/octeon/octeon.h>

#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
#include "ethernet-proc.h"
#include "octeon-ethernet.h"


#include "cvmx-pip.h"
#include "cvmx-pko.h"
Expand Down Expand Up @@ -130,53 +131,55 @@ extern struct semaphore mdio_sem;
*/
static void cvm_do_timer(unsigned long arg)
{
int32_t skb_to_free, undo;
int queues_per_port;
int qos;
struct octeon_ethernet *priv;
static int port;
if (port < CVMX_PIP_NUM_INPUT_PORTS) {
if (cvm_oct_device[port]) {
int queues_per_port;
int qos;
struct octeon_ethernet *priv =
netdev_priv(cvm_oct_device[port]);
if (priv->poll) {
/* skip polling if we don't get the lock */
if (!down_trylock(&mdio_sem)) {
priv->poll(cvm_oct_device[port]);
up(&mdio_sem);
}
}

queues_per_port = cvmx_pko_get_num_queues(port);
/* Drain any pending packets in the free list */
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos])) {
spin_lock(&priv->tx_free_list[qos].
lock);
while (skb_queue_len
(&priv->tx_free_list[qos]) >
cvmx_fau_fetch_and_add32(priv->
fau +
qos * 4,
0))
dev_kfree_skb(__skb_dequeue
(&priv->
tx_free_list
[qos]));
spin_unlock(&priv->tx_free_list[qos].
lock);
}
}
cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
}
port++;
/* Poll the next port in a 50th of a second.
This spreads the polling of ports out a little bit */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
} else {
if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
/*
* All ports have been polled. Start the next
* iteration through the ports in one second.
*/
port = 0;
/* All ports have been polled. Start the next iteration through
the ports in one second */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
return;
}
if (!cvm_oct_device[port])
goto out;

priv = netdev_priv(cvm_oct_device[port]);
if (priv->poll) {
/* skip polling if we don't get the lock */
if (!down_trylock(&mdio_sem)) {
priv->poll(cvm_oct_device[port]);
up(&mdio_sem);
}
}

queues_per_port = cvmx_pko_get_num_queues(port);
/* Drain any pending packets in the free list */
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
MAX_SKB_TO_FREE);
undo = skb_to_free > 0 ?
MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
if (undo > 0)
cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
MAX_SKB_TO_FREE : -skb_to_free;
cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}
cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);

out:
port++;
/* Poll the next port in a 50th of a second.
This spreads the polling of ports out a little bit */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
}

/**
Expand Down

0 comments on commit b6338d1

Please sign in to comment.