Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 187936
b: refs/heads/master
c: 8eae939
h: refs/heads/master
v: v3
  • Loading branch information
Zhu Yi authored and David S. Miller committed Mar 5, 2010
1 parent 53c2a20 commit 4a8c993
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 12c3400a84742f8bb0e4edc822e9ccba58781e0c
refs/heads/master: 8eae939f1400326b06d0c9afe53d2a484a326871
15 changes: 14 additions & 1 deletion trunk/include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,8 @@ struct sock {
struct {
struct sk_buff *head;
struct sk_buff *tail;
int len;
int limit;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
Expand Down Expand Up @@ -589,7 +591,7 @@ static inline int sk_stream_memory_free(struct sock *sk)
return sk->sk_wmem_queued < sk->sk_sndbuf;
}

/* The per-socket spinlock must be held here. */
/* OOB backlog add */
static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_backlog.tail) {
Expand All @@ -601,6 +603,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb->next = NULL;
}

/* The per-socket spinlock must be held here. */
static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
{
if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
return -ENOBUFS;

sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
}

static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
Expand Down
16 changes: 14 additions & 2 deletions trunk/net/core/sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
rc = sk_backlog_rcv(sk, skb);

mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else
sk_add_backlog(sk, skb);
} else if (sk_add_backlog_limited(sk, skb)) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}

bh_unlock_sock(sk);
out:
sock_put(sk);
Expand Down Expand Up @@ -1139,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_backlog.len = 0;

atomic_set(&newsk->sk_rmem_alloc, 0);
/*
Expand Down Expand Up @@ -1542,6 +1547,12 @@ static void __release_sock(struct sock *sk)

bh_lock_sock(sk);
} while ((skb = sk->sk_backlog.head) != NULL);

/*
* Doing the zeroing here guarantee we can not loop forever
* while a wild producer attempts to flood us.
*/
sk->sk_backlog.len = 0;
}

/**
Expand Down Expand Up @@ -1874,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);

Expand Down

0 comments on commit 4a8c993

Please sign in to comment.