Skip to content

Commit

Permalink
tcp: do not block bh during prequeue processing
Browse files Browse the repository at this point in the history
AFAIK, nothing in current TCP stack absolutely wants BH
being disabled once socket is owned by a thread running in
process context.

As mentioned in my prior patch ("tcp: give prequeue mode some care"),
processing a batch of packets might take time, better not block BH
at all.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Eric Dumazet authored and David S. Miller committed May 2, 2016
1 parent c10d931 commit fb3477c
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 32 deletions.
4 changes: 0 additions & 4 deletions net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1449,12 +1449,8 @@ static void tcp_prequeue_process(struct sock *sk)

NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);

/* RX process wants to run with disabled BHs, though it is not
* necessary */
local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb);
local_bh_enable();

/* Clear memory counter. */
tp->ucopy.memory = 0;
Expand Down
30 changes: 2 additions & 28 deletions net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -4611,14 +4611,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)

__set_current_state(TASK_RUNNING);

local_bh_enable();
if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
eaten = (chunk == skb->len);
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
}

if (eaten <= 0) {
Expand Down Expand Up @@ -5134,7 +5132,6 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
int chunk = skb->len - hlen;
int err;

local_bh_enable();
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
else
Expand All @@ -5146,32 +5143,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
tcp_rcv_space_adjust(sk);
}

local_bh_disable();
return err;
}

static __sum16 __tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
__sum16 result;

if (sock_owned_by_user(sk)) {
local_bh_enable();
result = __tcp_checksum_complete(skb);
local_bh_disable();
} else {
result = __tcp_checksum_complete(skb);
}
return result;
}

static inline bool tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete_user(sk, skb);
}

/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/
Expand Down Expand Up @@ -5386,7 +5360,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
}
if (!eaten) {
if (tcp_checksum_complete_user(sk, skb))
if (tcp_checksum_complete(skb))
goto csum_error;

if ((int)skb->truesize > sk->sk_forward_alloc)
Expand Down Expand Up @@ -5430,7 +5404,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}

slow_path:
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
if (len < (th->doff << 2) || tcp_checksum_complete(skb))
goto csum_error;

if (!th->ack && !th->rst && !th->syn)
Expand Down

0 comments on commit fb3477c

Please sign in to comment.