Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 199198
b: refs/heads/master
c: 8a74ad6
h: refs/heads/master
v: v3
  • Loading branch information
Eric Dumazet authored and David S. Miller committed May 27, 2010
1 parent 05a90f2 commit f7461a4
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a56635a56f2afb3d22d9ce07e8f8d69537416b2d
refs/heads/master: 8a74ad60a546b13bd1096b2a61a7a5c6fd9ae17c
20 changes: 14 additions & 6 deletions trunk/include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -1026,15 +1026,23 @@ extern void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))

static inline void lock_sock_bh(struct sock *sk)
extern bool lock_sock_fast(struct sock *sk);
/**
* unlock_sock_fast - complement of lock_sock_fast
* @sk: socket
* @slow: slow mode
*
* fast unlock socket for user context.
* If slow mode is on, we call regular release_sock()
*/
static inline void unlock_sock_fast(struct sock *sk, bool slow)
{
spin_lock_bh(&sk->sk_lock.slock);
if (slow)
release_sock(sk);
else
spin_unlock_bh(&sk->sk_lock.slock);
}

static inline void unlock_sock_bh(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
}

extern struct sock *sk_alloc(struct net *net, int family,
gfp_t priority,
Expand Down
6 changes: 4 additions & 2 deletions trunk/net/core/datagram.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);

void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
bool slow;

if (likely(atomic_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
return;

lock_sock_bh(sk);
slow = lock_sock_fast(sk);
skb_orphan(skb);
sk_mem_reclaim_partial(sk);
unlock_sock_bh(sk);
unlock_sock_fast(sk, slow);

/* skb is now orphaned, can be freed outside of locked section */
__kfree_skb(skb);
Expand Down
33 changes: 33 additions & 0 deletions trunk/net/core/sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
}
EXPORT_SYMBOL(release_sock);

/**
* lock_sock_fast - fast version of lock_sock
* @sk: socket
*
* This version should be used for very small section, where process wont block
* return false if fast path is taken
* sk_lock.slock locked, owned = 0, BH disabled
* return true if slow path is taken
* sk_lock.slock unlocked, owned = 1, BH enabled
*/
bool lock_sock_fast(struct sock *sk)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);

if (!sk->sk_lock.owned)
/*
* Note : We must disable BH
*/
return false;

__lock_sock(sk);
sk->sk_lock.owned = 1;
spin_unlock(&sk->sk_lock.slock);
/*
* The sk_lock has mutex_lock() semantics here:
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
local_bh_enable();
return true;
}
EXPORT_SYMBOL(lock_sock_fast);

int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
struct timeval tv;
Expand Down
14 changes: 8 additions & 6 deletions trunk/net/ipv4/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
spin_unlock_bh(&rcvq->lock);

if (!skb_queue_empty(&list_kill)) {
lock_sock_bh(sk);
bool slow = lock_sock_fast(sk);

__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
unlock_sock_bh(sk);
unlock_sock_fast(sk, slow);
}
return res;
}
Expand Down Expand Up @@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
bool slow;

/*
* Check any passed addresses
Expand Down Expand Up @@ -1197,10 +1199,10 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return err;

csum_copy_err:
lock_sock_bh(sk);
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags))
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
unlock_sock_bh(sk);
unlock_sock_fast(sk, slow);

if (noblock)
return -EAGAIN;
Expand Down Expand Up @@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)

void udp_destroy_sock(struct sock *sk)
{
lock_sock_bh(sk);
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_bh(sk);
unlock_sock_fast(sk, slow);
}

/*
Expand Down
5 changes: 3 additions & 2 deletions trunk/net/ipv6/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;

if (addr_len)
*addr_len=sizeof(struct sockaddr_in6);
Expand Down Expand Up @@ -424,7 +425,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
return err;

csum_copy_err:
lock_sock_bh(sk);
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
Expand All @@ -433,7 +434,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
unlock_sock_bh(sk);
unlock_sock_fast(sk, slow);

if (flags & MSG_DONTWAIT)
return -EAGAIN;
Expand Down

0 comments on commit f7461a4

Please sign in to comment.