Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 194279
b: refs/heads/master
c: 3eb14b9
h: refs/heads/master
i:
  194277: a9f72d3
  194275: 3fc345c
  194271: 4fe50bb
v: v3
  • Loading branch information
David S. Miller committed Apr 15, 2010
1 parent 48cc589 commit 399f210
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5c01d5669356e13f0fb468944c1dd4c6a7e978ad
refs/heads/master: 3eb14b944f2b5b6efe4e0ae3fe9601db78437d57
2 changes: 1 addition & 1 deletion trunk/include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -1331,7 +1331,7 @@ struct softnet_data {
struct sk_buff *completion_queue;

/* Elements below can be accessed between CPUs for RPS */
#ifdef CONFIG_SMP
#ifdef CONFIG_RPS
struct call_single_data csd ____cacheline_aligned_in_smp;
#endif
struct sk_buff_head input_pkt_queue;
Expand Down
25 changes: 15 additions & 10 deletions trunk/net/core/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
* rcu_read_lock must be held on entry.
*/
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
{
Expand All @@ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
u8 ip_proto;
u32 addr1, addr2, ports, ihl;

rcu_read_lock();

if (skb_rx_queue_recorded(skb)) {
u16 index = skb_get_rx_queue(skb);
if (unlikely(index >= dev->num_rx_queues)) {
Expand Down Expand Up @@ -2296,7 +2295,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
}

done:
rcu_read_unlock();
return cpu;
}

Expand Down Expand Up @@ -2392,7 +2390,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu)

int netif_rx(struct sk_buff *skb)
{
int cpu;
int ret;

/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
Expand All @@ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb)
net_timestamp(skb);

#ifdef CONFIG_RPS
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
{
int cpu;

rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb);
if (cpu < 0)
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, cpu);
rcu_read_unlock();
}
#else
cpu = smp_processor_id();
ret = enqueue_to_backlog(skb, get_cpu());
put_cpu();
#endif

return enqueue_to_backlog(skb, cpu);
return ret;
}
EXPORT_SYMBOL(netif_rx);

Expand Down
2 changes: 1 addition & 1 deletion trunk/net/ipv4/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ config IP_MROUTE

config IP_MROUTE_MULTIPLE_TABLES
bool "IP: multicast policy routing"
depends on IP_ADVANCED_ROUTER
depends on IP_MROUTE && IP_ADVANCED_ROUTER
select FIB_RULES
help
Normally, a multicast router runs a userspace daemon and decides
Expand Down
10 changes: 8 additions & 2 deletions trunk/net/ipv4/ipmr.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@

struct mr_table {
struct list_head list;
#ifdef CONFIG_NET_NS
struct net *net;
#endif
u32 id;
struct sock *mroute_sk;
struct timer_list ipmr_expire_timer;
Expand Down Expand Up @@ -308,6 +311,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (mrt == NULL)
return NULL;
write_pnet(&mrt->net, net);
mrt->id = id;

/* Forwarding cache */
Expand Down Expand Up @@ -580,7 +584,7 @@ static inline void ipmr_cache_free(struct mfc_cache *c)

static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
struct net *net = NULL; //mrt->net;
struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
struct nlmsgerr *e;

Expand Down Expand Up @@ -1089,20 +1093,22 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
found = false;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
if (uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
list_del(&uc->list);
atomic_dec(&mrt->cache_resolve_queue_len);
found = true;
break;
}
}
if (list_empty(&mrt->mfc_unres_queue))
del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);

if (uc) {
if (found) {
ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc);
}
Expand Down

0 comments on commit 399f210

Please sign in to comment.