Skip to content

Commit

Permalink
tcp: Move code around
Browse files Browse the repository at this point in the history
This is just the preparation patch, which makes the needed for
TCP repair code ready for use.

Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Pavel Emelyanov authored and David S. Miller committed Apr 21, 2012
1 parent 4a17fd5 commit 370816a
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 36 deletions.
3 changes: 3 additions & 0 deletions include/net/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,9 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_values *rvp);
extern int tcp_disconnect(struct sock *sk, int flags);

void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen);

/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -919,7 +919,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags, err, copied;
int mss_now, size_goal;
int mss_now = 0, size_goal;
bool sg;
long timeo;

Expand Down
81 changes: 48 additions & 33 deletions net/ipv4/tcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -5325,6 +5325,14 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
return 0;
}

void tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen)
{
__skb_pull(skb, hdrlen);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
}

/*
* TCP receive function for the ESTABLISHED state.
*
Expand Down Expand Up @@ -5490,10 +5498,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);

/* Bulk data transfer: receiver */
__skb_pull(skb, tcp_header_len);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
tcp_queue_rcv(sk, skb, tcp_header_len);
}

tcp_event_data_recv(sk, skb);
Expand Down Expand Up @@ -5559,6 +5564,44 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
EXPORT_SYMBOL(tcp_rcv_established);

void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);

tcp_set_state(sk, TCP_ESTABLISHED);

if (skb != NULL)
security_inet_conn_established(sk, skb);

/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);

tcp_init_metrics(sk);

tcp_init_congestion_control(sk);

/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
*/
tp->lsndtime = tcp_time_stamp;

tcp_init_buffer_space(sk);

if (sock_flag(sk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));

if (!tp->rx_opt.snd_wscale)
__tcp_fast_path_on(tp, tp->snd_wnd);
else
tp->pred_flags = 0;

if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
}

static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
Expand Down Expand Up @@ -5691,36 +5734,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
}

smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);

security_inet_conn_established(sk, skb);

/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);

tcp_init_metrics(sk);

tcp_init_congestion_control(sk);

/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
*/
tp->lsndtime = tcp_time_stamp;

tcp_init_buffer_space(sk);

if (sock_flag(sk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));

if (!tp->rx_opt.snd_wscale)
__tcp_fast_path_on(tp, tp->snd_wnd);
else
tp->pred_flags = 0;

if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
tcp_finish_connect(sk, skb);

if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
Expand Down
4 changes: 2 additions & 2 deletions net/ipv4/tcp_output.c
Original file line number Diff line number Diff line change
Expand Up @@ -2561,7 +2561,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
EXPORT_SYMBOL(tcp_make_synack);

/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
void tcp_connect_init(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
Expand Down Expand Up @@ -2616,6 +2616,7 @@ static void tcp_connect_init(struct sock *sk)
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
tp->snd_nxt = tp->write_seq;
tp->rcv_nxt = 0;
tp->rcv_wup = 0;
tp->copied_seq = 0;
Expand All @@ -2641,7 +2642,6 @@ int tcp_connect(struct sock *sk)
/* Reserve space for headers. */
skb_reserve(buff, MAX_TCP_HEADER);

tp->snd_nxt = tp->write_seq;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
TCP_ECN_send_syn(sk, buff);

Expand Down

0 comments on commit 370816a

Please sign in to comment.