diff --git a/[refs] b/[refs] index e597e6f1faa0..74d0b78d14cf 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 37cc6780170f6f4fc3f9746d6a9bb2da1d999d7b +refs/heads/master: bc0fde2fad007a81ecffceb25a893a6c3f1ed767 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 45b26e78bbcd..c67a402a6857 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3071,7 +3071,6 @@ M: horms@verge.net.au P: Julian Anastasov M: ja@ssi.bg L: netdev@vger.kernel.org -L: lvs-devel@vger.kernel.org S: Maintained NFS CLIENT diff --git a/trunk/net/ipv4/ipvs/ip_vs_sync.c b/trunk/net/ipv4/ipvs/ip_vs_sync.c index 45e9bd96c286..a652da2c3200 100644 --- a/trunk/net/ipv4/ipvs/ip_vs_sync.c +++ b/trunk/net/ipv4/ipvs/ip_vs_sync.c @@ -904,9 +904,9 @@ int stop_sync_thread(int state) * progress of stopping the master sync daemon. */ - spin_lock(&ip_vs_sync_lock); + spin_lock_bh(&ip_vs_sync_lock); ip_vs_sync_state &= ~IP_VS_STATE_MASTER; - spin_unlock(&ip_vs_sync_lock); + spin_unlock_bh(&ip_vs_sync_lock); kthread_stop(sync_master_thread); sync_master_thread = NULL; } else if (state == IP_VS_STATE_BACKUP) { diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index 8e42fbbd5761..383d17359d01 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -989,9 +989,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) up->encap_rcv != NULL) { int ret; - bh_unlock_sock(sk); ret = (*up->encap_rcv)(sk, skb); - bh_lock_sock(sk); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, @@ -1094,7 +1092,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (skb1) { int ret = 0; - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) ret = udp_queue_rcv_skb(sk, skb1); else @@ -1196,7 +1194,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], if (sk != NULL) { int ret = 0; - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) ret = udp_queue_rcv_skb(sk, skb); else diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index a6aecf76a71b..d1477b350f76 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, uh->source, saddr, dif))) { struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); if (buff) { - bh_lock_sock(sk2); + bh_lock_sock_nested(sk2); if (!sock_owned_by_user(sk2)) udpv6_queue_rcv_skb(sk2, buff); else @@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, bh_unlock_sock(sk2); } } - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else @@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], /* deliver */ - bh_lock_sock(sk); + bh_lock_sock_nested(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else