Skip to content

Commit

Permalink
[NET]: Eliminate netif_rx massive packet drops.
Browse files Browse the repository at this point in the history
Eliminate the throttling behaviour when the netif receive queue fills
because it behaves badly when using high speed networks under load.
The throttling cause multiple packet drops that cause TCP to go into
slow start mode. The same effective patch has been part of BIC TCP and
H-TCP as well as part of Web100.

The existing code drops 100's of packets when the queue fills;
this changes it to individual packet drop-tail. 

Signed-off-by: Stephen Hemmminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Stephen Hemminger authored and David S. Miller committed Jun 24, 2005
1 parent 34008d8 commit 31aa02c
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 22 deletions.
4 changes: 1 addition & 3 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ struct netif_rx_stats
unsigned total;
unsigned dropped;
unsigned time_squeeze;
unsigned throttled;
unsigned cpu_collision;
};

Expand Down Expand Up @@ -557,10 +556,9 @@ static inline int unregister_gifconf(unsigned int family)

struct softnet_data
{
int throttle;
struct net_device *output_queue;
struct sk_buff_head input_pkt_queue;
struct list_head poll_list;
struct net_device *output_queue;
struct sk_buff *completion_queue;

struct net_device backlog_dev; /* Sorry. 8) */
Expand Down
21 changes: 2 additions & 19 deletions net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ static struct notifier_block *netdev_chain;
* Device drivers call our routines to queue packets here. We empty the
* queue in the local softnet handler.
*/
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, };
DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };

#ifdef CONFIG_SYSFS
extern int netdev_sysfs_init(void);
Expand Down Expand Up @@ -1372,7 +1372,6 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };

int netif_rx(struct sk_buff *skb)
{
int this_cpu;
struct softnet_data *queue;
unsigned long flags;

Expand All @@ -1388,35 +1387,22 @@ int netif_rx(struct sk_buff *skb)
* short when CPU is congested, but is still operating.
*/
local_irq_save(flags);
this_cpu = smp_processor_id();
queue = &__get_cpu_var(softnet_data);

__get_cpu_var(netdev_rx_stat).total++;
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
if (queue->input_pkt_queue.qlen) {
if (queue->throttle)
goto drop;

enqueue:
dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue, skb);
local_irq_restore(flags);
return NET_RX_SUCCESS;
}

if (queue->throttle)
queue->throttle = 0;

netif_rx_schedule(&queue->backlog_dev);
goto enqueue;
}

if (!queue->throttle) {
queue->throttle = 1;
__get_cpu_var(netdev_rx_stat).throttled++;
}

drop:
__get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags);

Expand Down Expand Up @@ -1701,8 +1687,6 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
smp_mb__before_clear_bit();
netif_poll_enable(backlog_dev);

if (queue->throttle)
queue->throttle = 0;
local_irq_enable();
return 0;
}
Expand Down Expand Up @@ -1976,7 +1960,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
struct netif_rx_stats *s = v;

seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
s->total, s->dropped, s->time_squeeze, s->throttled,
s->total, s->dropped, s->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
s->cpu_collision );
return 0;
Expand Down Expand Up @@ -3220,7 +3204,6 @@ static int __init net_dev_init(void)

queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue);
queue->throttle = 0;
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
Expand Down

0 comments on commit 31aa02c

Please sign in to comment.