From ea0ec72f7650d3a3d32565ad4fa8c52541d2dba2 Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Tue, 30 Mar 2010 20:16:22 +0000 Subject: [PATCH] --- yaml --- r: 193911 b: refs/heads/master c: 152102c7f2bf191690f1069bae292ea3925adf14 h: refs/heads/master i: 193909: 111a974ecc03d69ccca2fb40e8dc7676b5bfb63f 193907: 2648ac917695add5f010df75ed65a1190b8dda2f 193903: ed2582a1b0433a021c6695233381045bbbd3cd51 v: v3 --- [refs] | 2 +- trunk/net/core/dev.c | 42 ++++++++++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/[refs] b/[refs] index 531b0387f731..91d24f86aa34 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 630b943c182d1aed69f244405131902fbcba7ec6 +refs/heads/master: 152102c7f2bf191690f1069bae292ea3925adf14 diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 887aa84fcd46..427cd53c118d 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -206,6 +206,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } +static inline void rps_lock(struct softnet_data *queue) +{ +#ifdef CONFIG_RPS + spin_lock(&queue->input_pkt_queue.lock); +#endif +} + +static inline void rps_unlock(struct softnet_data *queue) +{ +#ifdef CONFIG_RPS + spin_unlock(&queue->input_pkt_queue.lock); +#endif +} + /* Device list insertion */ static int list_netdevice(struct net_device *dev) { @@ -2313,13 +2327,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) local_irq_save(flags); __get_cpu_var(netdev_rx_stat).total++; - spin_lock(&queue->input_pkt_queue.lock); + rps_lock(queue); if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: __skb_queue_tail(&queue->input_pkt_queue, skb); - spin_unlock_irqrestore(&queue->input_pkt_queue.lock, - flags); + rps_unlock(queue); + local_irq_restore(flags); return NET_RX_SUCCESS; } @@ -2341,7 +2355,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) goto enqueue; } - spin_unlock(&queue->input_pkt_queue.lock); + rps_unlock(queue); __get_cpu_var(netdev_rx_stat).dropped++; local_irq_restore(flags); @@ -2766,19 +2780,19 @@ int netif_receive_skb(struct sk_buff *skb) EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending */ -static void flush_backlog(struct net_device *dev, int cpu) +static void flush_backlog(void *arg) { - struct softnet_data *queue = &per_cpu(softnet_data, cpu); + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); struct sk_buff *skb, *tmp; - unsigned long flags; - spin_lock_irqsave(&queue->input_pkt_queue.lock, flags); + rps_lock(queue); skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) if (skb->dev == dev) { __skb_unlink(skb, &queue->input_pkt_queue); kfree_skb(skb); } - spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags); + rps_unlock(queue); } static int napi_gro_complete(struct sk_buff *skb) @@ -3091,14 +3105,16 @@ static int process_backlog(struct napi_struct *napi, int quota) do { struct sk_buff *skb; - spin_lock_irq(&queue->input_pkt_queue.lock); + local_irq_disable(); + rps_lock(queue); skb = __skb_dequeue(&queue->input_pkt_queue); if (!skb) { __napi_complete(napi); spin_unlock_irq(&queue->input_pkt_queue.lock); break; } - spin_unlock_irq(&queue->input_pkt_queue.lock); + rps_unlock(queue); + local_irq_enable(); __netif_receive_skb(skb); } while (++work < quota && jiffies == start_time); @@ -5548,7 +5564,6 @@ void netdev_run_todo(void) while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); - int i; list_del(&dev->todo_list); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { @@ -5560,8 +5575,7 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; - for_each_online_cpu(i) - flush_backlog(dev, i); + on_each_cpu(flush_backlog, dev, 1); netdev_wait_allrefs(dev);