Skip to content

Commit

Permalink
rxrpc: Kill the client connection bundle concept
Browse files Browse the repository at this point in the history
Kill off the concept of maintaining a bundle of connections to a particular
target service to increase the number of call slots available for any
beyond four for that service (there are four call slots per connection).

This will make cleaning up the connection handling code easier and
facilitate removal of the rxrpc_transport struct.  Bundling can be
reintroduced later if necessary.

Signed-off-by: David Howells <dhowells@redhat.com>
  • Loading branch information
David Howells committed Jun 22, 2016
1 parent 5627cc8 commit 999b69f
Show file tree
Hide file tree
Showing 8 changed files with 288 additions and 480 deletions.
11 changes: 1 addition & 10 deletions net/rxrpc/af_rxrpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
gfp_t gfp)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_conn_bundle *bundle;
struct rxrpc_transport *trans;
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
Expand Down Expand Up @@ -311,15 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
}
cp.peer = trans->peer;

bundle = rxrpc_get_bundle(rx, trans, key, srx->srx_service, gfp);
if (IS_ERR(bundle)) {
call = ERR_CAST(bundle);
goto out;
}

call = rxrpc_new_client_call(rx, &cp, trans, bundle, user_call_ID, gfp);
rxrpc_put_bundle(trans, bundle);
out:
call = rxrpc_new_client_call(rx, &cp, trans, srx, user_call_ID, gfp);
rxrpc_put_transport(trans);
out_notrans:
release_sock(&rx->sk);
Expand Down
57 changes: 20 additions & 37 deletions net/rxrpc/ar-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,8 @@ struct rxrpc_local {
struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
struct mutex conn_lock; /* Client connection creation lock */
struct rb_root client_conns; /* Client connections by socket params */
spinlock_t client_conns_lock; /* Lock for client_conns */
spinlock_t lock; /* access lock */
rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */
Expand Down Expand Up @@ -232,34 +233,14 @@ struct rxrpc_peer {
struct rxrpc_transport {
struct rxrpc_local *local; /* local transport endpoint */
struct rxrpc_peer *peer; /* remote transport endpoint */
struct rb_root bundles; /* client connection bundles on this transport */
struct rb_root server_conns; /* server connections on this transport */
struct list_head link; /* link in master session list */
unsigned long put_time; /* time at which to reap */
spinlock_t client_lock; /* client connection allocation lock */
rwlock_t conn_lock; /* lock for active/dead connections */
atomic_t usage;
int debug_id; /* debug ID for printks */
};

/*
* RxRPC client connection bundle
* - matched by { transport, service_id, key }
*/
struct rxrpc_conn_bundle {
struct rb_node node; /* node in transport's lookup tree */
struct list_head unused_conns; /* unused connections in this bundle */
struct list_head avail_conns; /* available connections in this bundle */
struct list_head busy_conns; /* busy connections in this bundle */
struct key *key; /* security for this bundle */
wait_queue_head_t chanwait; /* wait for channel to become available */
atomic_t usage;
int debug_id; /* debug ID for printks */
unsigned short num_conns; /* number of connections in this bundle */
u16 service_id; /* Service ID for this bundle */
u8 security_ix; /* security type */
};

/*
* Keys for matching a connection.
*/
Expand Down Expand Up @@ -295,17 +276,21 @@ struct rxrpc_conn_parameters {
*/
struct rxrpc_connection {
struct rxrpc_transport *trans; /* transport session */
struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
struct rxrpc_conn_proto proto;
struct rxrpc_conn_parameters params;

spinlock_t channel_lock;
struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* active calls */
wait_queue_head_t channel_wq; /* queue to wait for channel to become available */

struct work_struct processor; /* connection event processor */
struct rb_node node; /* node in transport's lookup tree */
union {
struct rb_node client_node; /* Node in local->client_conns */
struct rb_node service_node; /* Node in trans->server_conns */
};
struct list_head link; /* link in master connection list */
struct list_head bundle_link; /* link in bundle */
struct rb_root calls; /* calls on this connection */
struct sk_buff_head rx_queue; /* received conn-level packets */
struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
const struct rxrpc_security *security; /* applied security module */
struct key *server_key; /* security for this service */
struct crypto_skcipher *cipher; /* encryption handle */
Expand All @@ -314,7 +299,7 @@ struct rxrpc_connection {
#define RXRPC_CONN_HAS_IDR 0 /* - Has a client conn ID assigned */
unsigned long events;
#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
unsigned long put_time; /* time at which to reap */
unsigned long put_time; /* Time at which last put */
rwlock_t lock; /* access lock */
spinlock_t state_lock; /* state-change lock */
atomic_t usage;
Expand All @@ -335,7 +320,7 @@ struct rxrpc_connection {
unsigned int call_counter; /* call ID counter */
atomic_t serial; /* packet serial number counter */
atomic_t hi_serial; /* highest serial number received */
u8 avail_calls; /* number of calls available */
atomic_t avail_chans; /* number of channels available */
u8 size_align; /* data size alignment (for security) */
u8 header_size; /* rxrpc + security header size */
u8 security_size; /* security header size */
Expand Down Expand Up @@ -386,6 +371,8 @@ enum rxrpc_call_event {
* The states that a call can be in.
*/
enum rxrpc_call_state {
RXRPC_CALL_UNINITIALISED,
RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
Expand Down Expand Up @@ -540,7 +527,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct rxrpc_transport *,
struct rxrpc_conn_bundle *,
struct sockaddr_rxrpc *,
unsigned long, gfp_t);
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
struct rxrpc_connection *,
Expand All @@ -555,8 +542,7 @@ void __exit rxrpc_destroy_all_calls(void);
*/
extern struct idr rxrpc_client_conn_ids;

int rxrpc_get_client_connection_id(struct rxrpc_connection *,
struct rxrpc_transport *, gfp_t);
int rxrpc_get_client_connection_id(struct rxrpc_connection *, gfp_t);
void rxrpc_put_client_connection_id(struct rxrpc_connection *);

/*
Expand All @@ -573,13 +559,10 @@ extern unsigned int rxrpc_connection_expiry;
extern struct list_head rxrpc_connections;
extern rwlock_t rxrpc_connection_lock;

struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
struct rxrpc_transport *,
struct key *, u16, gfp_t);
void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *,
struct rxrpc_transport *, struct rxrpc_conn_bundle *,
struct rxrpc_call *, gfp_t);
int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
struct rxrpc_transport *,
struct sockaddr_rxrpc *, gfp_t);
void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_put_connection(struct rxrpc_connection *);
void __exit rxrpc_destroy_all_connections(void);
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
Expand Down
124 changes: 65 additions & 59 deletions net/rxrpc/call_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ unsigned int rxrpc_max_call_lifetime = 60 * HZ;
unsigned int rxrpc_dead_call_expiry = 2 * HZ;

const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_UNINITIALISED] = "Uninit",
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
[RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
Expand Down Expand Up @@ -261,6 +263,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
(unsigned long) call);
INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
INIT_WORK(&call->processor, &rxrpc_process_call);
INIT_LIST_HEAD(&call->link);
INIT_LIST_HEAD(&call->accept_link);
skb_queue_head_init(&call->rx_queue);
skb_queue_head_init(&call->rx_oos_queue);
Expand All @@ -269,7 +272,6 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;

memset(&call->sock_node, 0xed, sizeof(call->sock_node));

Expand All @@ -282,55 +284,70 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
}

/*
* allocate a new client call and attempt to get a connection slot for it
* Allocate a new client call.
*/
static struct rxrpc_call *rxrpc_alloc_client_call(
struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct rxrpc_transport *trans,
struct rxrpc_conn_bundle *bundle,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
struct rxrpc_call *call;
int ret;

_enter("");

ASSERT(rx != NULL);
ASSERT(trans != NULL);
ASSERT(bundle != NULL);
ASSERT(rx->local != NULL);

call = rxrpc_alloc_call(gfp);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;

sock_hold(&rx->sk);
call->socket = rx;
call->rx_data_post = 1;

ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp);
if (ret < 0) {
kmem_cache_free(rxrpc_call_jar, call);
return ERR_PTR(ret);
}

/* Record copies of information for hashtable lookup */
call->family = rx->family;
call->local = call->conn->params.local;
call->local = rx->local;
switch (call->family) {
case AF_INET:
call->peer_ip.ipv4_addr =
call->conn->params.peer->srx.transport.sin.sin_addr.s_addr;
call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr;
break;
case AF_INET6:
memcpy(call->peer_ip.ipv6_addr,
call->conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
srx->transport.sin6.sin6_addr.in6_u.u6_addr8,
sizeof(call->peer_ip.ipv6_addr));
break;
}
call->epoch = call->conn->proto.epoch;
call->service_id = call->conn->params.service_id;
call->in_clientflag = call->conn->proto.in_clientflag;

call->service_id = srx->srx_service;
call->in_clientflag = 0;

_leave(" = %p", call);
return call;
}

/*
* Begin client call.
*/
static int rxrpc_begin_client_call(struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct rxrpc_transport *trans,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
int ret;

/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
ret = rxrpc_connect_call(call, cp, trans, srx, gfp);
if (ret < 0)
return ret;

call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;

/* Add the new call to the hashtable */
rxrpc_call_hash_add(call);

Expand All @@ -340,9 +357,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(

call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
add_timer(&call->lifetimer);

_leave(" = %p", call);
return call;
return 0;
}

/*
Expand All @@ -352,23 +367,23 @@ static struct rxrpc_call *rxrpc_alloc_client_call(
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct rxrpc_transport *trans,
struct rxrpc_conn_bundle *bundle,
struct sockaddr_rxrpc *srx,
unsigned long user_call_ID,
gfp_t gfp)
{
struct rxrpc_call *call, *xcall;
struct rb_node *parent, **pp;
int ret;

_enter("%p,%d,%d,%lx",
rx, trans->debug_id, bundle ? bundle->debug_id : -1,
user_call_ID);
_enter("%p,%lx", rx, user_call_ID);

call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp);
call = rxrpc_alloc_client_call(rx, cp, srx, gfp);
if (IS_ERR(call)) {
_leave(" = %ld", PTR_ERR(call));
return call;
}

/* Publish the call, even though it is incompletely set up as yet */
call->user_call_ID = user_call_ID;
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);

Expand Down Expand Up @@ -398,11 +413,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
list_add_tail(&call->link, &rxrpc_calls);
write_unlock_bh(&rxrpc_call_lock);

ret = rxrpc_begin_client_call(call, cp, trans, srx, gfp);
if (ret < 0)
goto error;

_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

_leave(" = %p [new]", call);
return call;

error:
write_lock(&rx->call_lock);
rb_erase(&call->sock_node, &rx->calls);
write_unlock(&rx->call_lock);
rxrpc_put_call(call);

write_lock_bh(&rxrpc_call_lock);
list_del(&call->link);
write_unlock_bh(&rxrpc_call_lock);

rxrpc_put_call(call);
_leave(" = %d", ret);
return ERR_PTR(ret);

/* We unexpectedly found the user ID in the list after taking
* the call_lock. This shouldn't happen unless the user races
* with itself and tries to add the same user ID twice at the
Expand Down Expand Up @@ -612,40 +645,13 @@ void rxrpc_release_call(struct rxrpc_call *call)
write_unlock_bh(&rx->call_lock);

/* free up the channel for reuse */
spin_lock(&conn->trans->client_lock);
spin_lock(&conn->channel_lock);
write_lock_bh(&conn->lock);
write_lock(&call->state_lock);

if (conn->channels[call->channel] == call)
conn->channels[call->channel] = NULL;

if (conn->out_clientflag && conn->bundle) {
conn->avail_calls++;
switch (conn->avail_calls) {
case 1:
list_move_tail(&conn->bundle_link,
&conn->bundle->avail_conns);
case 2 ... RXRPC_MAXCALLS - 1:
ASSERT(conn->channels[0] == NULL ||
conn->channels[1] == NULL ||
conn->channels[2] == NULL ||
conn->channels[3] == NULL);
break;
case RXRPC_MAXCALLS:
list_move_tail(&conn->bundle_link,
&conn->bundle->unused_conns);
ASSERT(conn->channels[0] == NULL &&
conn->channels[1] == NULL &&
conn->channels[2] == NULL &&
conn->channels[3] == NULL);
break;
default:
pr_err("conn->avail_calls=%d\n", conn->avail_calls);
BUG();
}
}
rxrpc_disconnect_call(call);

spin_unlock(&conn->trans->client_lock);
spin_unlock(&conn->channel_lock);

if (call->state < RXRPC_CALL_COMPLETE &&
call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
Expand Down
Loading

0 comments on commit 999b69f

Please sign in to comment.