Skip to content

Commit

Permalink
Merge branch 'net-smc-fixes-2018-01-26'
Browse files Browse the repository at this point in the history
Ursula Braun says:

====================
net/smc: fixes 2018-01-26

here are some more smc patches. The first 4 patches take care about
different aspects of smc socket closing, the 5th patch improves
coding style.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jan 26, 2018
2 parents a81e4af + a8fbf8e commit 72a231b
Show file tree
Hide file tree
Showing 7 changed files with 191 additions and 127 deletions.
152 changes: 88 additions & 64 deletions net/smc/af_smc.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
goto out;

smc = smc_sk(sk);
sock_hold(sk);
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
* sock lock for child sockets again
Expand All @@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
else
lock_sock(sk);

if (smc->use_fallback) {
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
} else {
if (!smc->use_fallback) {
rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
Expand All @@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
sock_release(smc->clcsock);
smc->clcsock = NULL;
}
if (smc->use_fallback) {
sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
}

/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
if (smc->use_fallback) {
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
} else if (sk->sk_state == SMC_CLOSED) {
if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY);
}
release_sock(sk);

sock_put(sk);
sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
out:
return rc;
}
Expand Down Expand Up @@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_LIST_HEAD(&smc->accept_q);
spin_lock_init(&smc->accept_q_lock);
INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk);

Expand Down Expand Up @@ -399,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
int rc = 0;
u8 ibport;

sock_hold(&smc->sk); /* sock put in passive closing */

if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
/* peer has not signalled SMC-capability */
smc->use_fallback = true;
Expand Down Expand Up @@ -542,6 +540,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
out_err:
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
}

Expand Down Expand Up @@ -620,7 +620,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD);
new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk);
sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
}
Expand All @@ -637,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
{
struct smc_sock *par = smc_sk(parent);

sock_hold(sk);
sock_hold(sk); /* sock_put in smc_accept_unlink () */
spin_lock(&par->accept_q_lock);
list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
spin_unlock(&par->accept_q_lock);
Expand All @@ -653,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
list_del_init(&smc_sk(sk)->accept_q);
spin_unlock(&par->accept_q_lock);
sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
sock_put(sk);
sock_put(sk); /* sock_hold in smc_accept_enqueue */
}

/* remove a sock from the accept queue to bind it to a new socket created
Expand All @@ -670,8 +670,12 @@ struct sock *smc_accept_dequeue(struct sock *parent,

smc_accept_unlink(new_sk);
if (new_sk->sk_state == SMC_CLOSED) {
if (isk->clcsock) {
sock_release(isk->clcsock);
isk->clcsock = NULL;
}
new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk);
sock_put(new_sk); /* final */
continue;
}
if (new_sock)
Expand All @@ -686,14 +690,11 @@ void smc_close_non_accepted(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);

sock_hold(sk);
lock_sock(sk);
if (!sk->sk_lingertime)
/* wait for peer closing */
sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
if (smc->use_fallback) {
sk->sk_state = SMC_CLOSED;
} else {
if (!smc->use_fallback) {
smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
Expand All @@ -706,14 +707,15 @@ void smc_close_non_accepted(struct sock *sk)
sock_release(tcp);
}
if (smc->use_fallback) {
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
} else if (sk->sk_state == SMC_CLOSED) {
smc_conn_free(&smc->conn);
schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY);
sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
} else {
if (sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
}
release_sock(sk);
sock_put(sk);
sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
}

static int smc_serv_conf_first_link(struct smc_sock *smc)
Expand Down Expand Up @@ -937,6 +939,8 @@ static void smc_listen_work(struct work_struct *work)
smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
out_err:
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
smc_conn_free(&new_smc->conn);
goto enqueue; /* queue new sock with sk_err set */
Expand All @@ -963,12 +967,22 @@ static void smc_tcp_listen_work(struct work_struct *work)
sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
schedule_work(&new_smc->smc_listen_work);
sock_hold(&new_smc->sk); /* sock_put in passive closing */
if (!schedule_work(&new_smc->smc_listen_work))
sock_put(&new_smc->sk);
}

out:
if (lsmc->clcsock) {
sock_release(lsmc->clcsock);
lsmc->clcsock = NULL;
}
release_sock(lsk);
lsk->sk_data_ready(lsk); /* no more listening, wake accept */
/* no more listening, wake up smc_close_wait_listen_clcsock and
* accept
*/
lsk->sk_state_change(lsk);
sock_put(&lsmc->sk); /* sock_hold in smc_listen */
}

static int smc_listen(struct socket *sock, int backlog)
Expand Down Expand Up @@ -1002,7 +1016,9 @@ static int smc_listen(struct socket *sock, int backlog)
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
schedule_work(&smc->tcp_listen_work);
sock_hold(sk); /* sock_hold in tcp_listen_worker */
if (!schedule_work(&smc->tcp_listen_work))
sock_put(sk);

out:
release_sock(sk);
Expand All @@ -1019,6 +1035,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
int rc = 0;

lsmc = smc_sk(sk);
sock_hold(sk); /* sock_put below */
lock_sock(sk);

if (lsmc->sk.sk_state != SMC_LISTEN) {
Expand Down Expand Up @@ -1053,6 +1070,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,

out:
release_sock(sk);
sock_put(sk); /* sock_hold above */
return rc;
}

Expand Down Expand Up @@ -1122,21 +1140,15 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,

static unsigned int smc_accept_poll(struct sock *parent)
{
struct smc_sock *isk;
struct sock *sk;
struct smc_sock *isk = smc_sk(parent);
int mask = 0;

lock_sock(parent);
list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)isk;
spin_lock(&isk->accept_q_lock);
if (!list_empty(&isk->accept_q))
mask = POLLIN | POLLRDNORM;
spin_unlock(&isk->accept_q_lock);

if (sk->sk_state == SMC_ACTIVE) {
release_sock(parent);
return POLLIN | POLLRDNORM;
}
}
release_sock(parent);

return 0;
return mask;
}

static unsigned int smc_poll(struct file *file, struct socket *sock,
Expand All @@ -1147,9 +1159,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
struct smc_sock *smc;
int rc;

if (!sk)
return POLLNVAL;

smc = smc_sk(sock->sk);
sock_hold(sk);
lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */
release_sock(sk);
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
/* if non-blocking connect finished ... */
lock_sock(sk);
Expand All @@ -1161,37 +1179,43 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
rc = smc_connect_rdma(smc);
if (rc < 0)
mask |= POLLERR;
else
/* success cases including fallback */
mask |= POLLOUT | POLLWRNORM;
/* success cases including fallback */
mask |= POLLOUT | POLLWRNORM;
}
}
release_sock(sk);
} else {
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == SMC_LISTEN)
/* woken up by sk_data_ready in smc_listen_work() */
mask |= smc_accept_poll(sk);
if (sk->sk_state != SMC_CLOSED) {
release_sock(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
lock_sock(sk);
}
if (sk->sk_err)
mask |= POLLERR;
if (atomic_read(&smc->conn.sndbuf_space) ||
(sk->sk_shutdown & SEND_SHUTDOWN)) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= POLLIN;
if (sk->sk_state == SMC_LISTEN) {
/* woken up by sk_data_ready in smc_listen_work() */
mask = smc_accept_poll(sk);
} else {
if (atomic_read(&smc->conn.sndbuf_space) ||
sk->sk_shutdown & SEND_SHUTDOWN) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= POLLIN;
}

}
release_sock(sk);
sock_put(sk);

return mask;
}
Expand Down
5 changes: 2 additions & 3 deletions net/smc/smc.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ struct smc_sock { /* smc sock container */
struct work_struct smc_listen_work;/* prepare new accept socket */
struct list_head accept_q; /* sockets to be accepted */
spinlock_t accept_q_lock; /* protects accept_q */
struct delayed_work sock_put_work; /* final socket freeing */
bool use_fallback; /* fallback to tcp */
u8 wait_close_tx_prepared : 1;
/* shutdown wr or close
Expand Down Expand Up @@ -253,12 +252,12 @@ static inline int smc_uncompress_bufsize(u8 compressed)
static inline bool using_ipsec(struct smc_sock *smc)
{
return (smc->clcsock->sk->sk_policy[0] ||
smc->clcsock->sk->sk_policy[1]) ? 1 : 0;
smc->clcsock->sk->sk_policy[1]) ? true : false;
}
#else
static inline bool using_ipsec(struct smc_sock *smc)
{
return 0;
return false;
}
#endif

Expand Down
20 changes: 11 additions & 9 deletions net/smc/smc_cdc.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,14 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smc->sk.sk_data_ready(&smc->sk);
}

/* piggy backed tx info */
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
if (diff_cons && smc_tx_prepared_sends(conn)) {
smc_tx_sndbuf_nonempty(conn);
/* trigger socket release if connection closed */
smc_close_wake_tx_prepared(smc);
}

if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
smc->sk.sk_err = ECONNRESET;
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
Expand All @@ -221,15 +229,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
if (smc->clcsock && smc->clcsock->sk)
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(&smc->sk, SOCK_DONE);
schedule_work(&conn->close_work);
}

/* piggy backed tx info */
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
if (diff_cons && smc_tx_prepared_sends(conn)) {
smc_tx_sndbuf_nonempty(conn);
/* trigger socket release if connection closed */
smc_close_wake_tx_prepared(smc);
sock_hold(&smc->sk); /* sock_put in close_work */
if (!schedule_work(&conn->close_work))
sock_put(&smc->sk);
}
}

Expand Down
Loading

0 comments on commit 72a231b

Please sign in to comment.