Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 194559
b: refs/heads/master
c: c377411
h: refs/heads/master
i:
  194557: cf5a34d
  194555: 6329406
  194551: 8823880
  194543: 20c24fb
  194527: 707fe74
  194495: 48db416
  194431: bfcaf50
  194303: 74a32d6
  194047: 012cb60
  193535: e2bc907
  192511: 9325e2b
v: v3
  • Loading branch information
Eric Dumazet authored and David S. Miller committed Apr 27, 2010
1 parent 7c0cb3a commit d61d5ef
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6e7676c1a76aed6e957611d8d7a9e5592e23aeba
refs/heads/master: c377411f2494a931ff7facdbb3a6839b1266bcf6
13 changes: 11 additions & 2 deletions trunk/include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,6 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
int len;
int limit;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
Expand Down Expand Up @@ -608,10 +607,20 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb->next = NULL;
}

/*
* Take into account size of receive queue and backlog queue
*/
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

return qsize + skb->truesize > sk->sk_rcvbuf;
}

/* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
if (sk_rcvqueues_full(sk, skb))
return -ENOBUFS;

__sk_add_backlog(sk, skb);
Expand Down
5 changes: 4 additions & 1 deletion trunk/net/core/sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)

skb->dev = NULL;

if (sk_rcvqueues_full(sk, skb)) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
if (nested)
bh_lock_sock_nested(sk);
else
Expand Down Expand Up @@ -1885,7 +1889,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);

Expand Down
4 changes: 4 additions & 0 deletions trunk/net/ipv4/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1372,6 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
}


if (sk_rcvqueues_full(sk, skb))
goto drop;

rc = 0;

bh_lock_sock(sk);
Expand Down
8 changes: 8 additions & 0 deletions trunk/net/ipv6/udp.c
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,10 @@ static void flush_stack(struct sock **stack, unsigned int count,

sk = stack[i];
if (skb1) {
if (sk_rcvqueues_full(sk, skb)) {
kfree_skb(skb1);
goto drop;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
Expand Down Expand Up @@ -759,6 +763,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,

/* deliver */

if (sk_rcvqueues_full(sk, skb)) {
sock_put(sk);
goto discard;
}
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
Expand Down
3 changes: 0 additions & 3 deletions trunk/net/sctp/socket.c
Original file line number Diff line number Diff line change
Expand Up @@ -3721,9 +3721,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);

/* Set socket backlog limit. */
sk->sk_backlog.limit = sysctl_sctp_rmem[1];

local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
Expand Down

0 comments on commit d61d5ef

Please sign in to comment.