Skip to content

Commit

Permalink
Merge branch 'tls-cleanups'
Browse files Browse the repository at this point in the history
Sabrina Dubroca says:

====================
net: tls: various code cleanups and improvements

This series contains multiple cleanups and simplifications for the
config code of both TLS_SW and TLS_HW.

It also modifies the chcr_ktls driver to use driver_state like all
other drivers, so that we can then make driver_state fixed size
instead of a flex array always allocated to that same fixed size. As
reported by Gustavo A. R. Silva, the way chcr_ktls misuses
driver_state irritates GCC [1].

Patches 1 and 2 are follow-ups to my previous cipher_desc series.

[1] https://lore.kernel.org/netdev/ZRvzdlvlbX4+eIln@work/
====================

Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Oct 13, 2023
2 parents 895359b + 9f0c824 commit 35715ac
Show file tree
Hide file tree
Showing 8 changed files with 244 additions and 252 deletions.
43 changes: 19 additions & 24 deletions drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_uld_ctx *u_ctx;

Expand Down Expand Up @@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
chcr_set_ktls_tx_info(tls_ctx, NULL);
/* release module refcount */
module_put(THIS_MODULE);
}
Expand All @@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
Expand All @@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u8 daaddr[16];
int ret = -1;

tx_ctx = chcr_get_ktls_tx_context(tls_ctx);

pi = netdev_priv(netdev);
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
Expand All @@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out;
}

if (tx_ctx->chcr_info)
if (chcr_get_ktls_tx_info(tls_ctx))
goto out;

if (u_ctx && u_ctx->detach)
Expand Down Expand Up @@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto free_tid;

atomic64_inc(&port_stats->ktls_tx_ctx);
tx_ctx->chcr_info = tx_info;
chcr_set_ktls_tx_info(tls_ctx, tx_info);

return 0;

Expand Down Expand Up @@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
struct tls_context *tls_ctx;
Expand Down Expand Up @@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
/* Adding tid */
tls_ctx = tls_get_ctx(tx_info->sk);
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
tx_ctx = tls_offload_ctx_tx(tls_ctx);
u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (u_ctx) {
ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
Expand Down Expand Up @@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct tls_offload_context_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss;
Expand All @@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;

tls_ctx = tls_get_ctx(skb->sk);
tx_ctx = tls_offload_ctx_tx(tls_ctx);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't quit on NULL: if tls_device_down is running in parallel,
* netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
Expand All @@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tls_netdev && tls_netdev != dev))
goto out;

tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
tx_info = tx_ctx->chcr_info;
tx_info = chcr_get_ktls_tx_info(tls_ctx);

if (unlikely(!tx_info))
goto out;
Expand All @@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/

spin_lock_irqsave(&tx_ctx->base.lock, flags);
spin_lock_irqsave(&tx_ctx->lock, flags);

do {

cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* fetch the tls record */
record = tls_get_record(&tx_ctx->base, tcp_seq,
record = tls_get_record(tx_ctx, tcp_seq,
&tx_info->record_no);
/* By the time packet reached to us, ACK is received, and record
* won't be found in that case, handle it gracefully.
*/
if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
Expand All @@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset !=
record->len);
if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock,
spin_unlock_irqrestore(&tx_ctx->lock,
flags);
goto out;
}
Expand Down Expand Up @@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
spin_unlock_irqrestore(&tx_ctx->lock, flags);
goto out;
}

Expand Down Expand Up @@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)

/* if any failure, come out from the loop. */
if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
spin_unlock_irqrestore(&tx_ctx->lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);

Expand All @@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)

} while (data_len > 0);

spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);

Expand Down Expand Up @@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info)
static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
{
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
unsigned long index;

xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
tx_info = tx_ctx->chcr_info;
tx_info = __chcr_get_ktls_tx_info(tx_ctx);
clear_conn_resources(tx_info);
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX);
/* release module refcount */
module_put(THIS_MODULE);
}
Expand Down
36 changes: 27 additions & 9 deletions drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ struct chcr_ktls_info {
bool pending_close;
};

struct chcr_ktls_ofld_ctx_tx {
struct tls_offload_context_tx base;
struct chcr_ktls_ctx_tx {
struct chcr_ktls_info *chcr_info;
};

Expand All @@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx {
bool detach;
};

static inline struct chcr_ktls_ofld_ctx_tx *
chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
static inline struct chcr_ktls_info *
__chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx)
{
BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
TLS_OFFLOAD_CONTEXT_SIZE_TX);
return container_of(tls_offload_ctx_tx(tls_ctx),
struct chcr_ktls_ofld_ctx_tx,
base);
struct chcr_ktls_ctx_tx *priv_ctx;

BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state;
return priv_ctx->chcr_info;
}

static inline struct chcr_ktls_info *
chcr_get_ktls_tx_info(struct tls_context *tls_ctx)
{
struct chcr_ktls_ctx_tx *priv_ctx;

BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
return priv_ctx->chcr_info;
}

static inline void
chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info)
{
struct chcr_ktls_ctx_tx *priv_ctx;

priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
priv_ctx->chcr_info = chcr_info;
}

static inline int chcr_get_first_rx_qid(struct adapter *adap)
Expand Down
21 changes: 8 additions & 13 deletions include/net/tls.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ struct tls_rec;

#define TLS_AAD_SPACE_SIZE 13

#define MAX_IV_SIZE 16
#define TLS_MAX_IV_SIZE 16
#define TLS_MAX_SALT_SIZE 4
#define TLS_TAG_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
Expand Down Expand Up @@ -149,6 +150,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};

#define TLS_DRIVER_STATE_SIZE_TX 16
struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
Expand All @@ -162,17 +164,13 @@ struct tls_offload_context_tx {
void (*sk_destruct)(struct sock *sk);
struct work_struct destruct_work;
struct tls_context *ctx;
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
#define TLS_DRIVER_STATE_SIZE_TX 16
u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8);
};

#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
(sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)

enum tls_context_flags {
/* tls_device_down was called after the netdev went down, device state
* was released, and kTLS works in software, even though rx_conf is
Expand All @@ -193,8 +191,8 @@ enum tls_context_flags {
};

struct cipher_context {
char *iv;
char *rec_seq;
char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
char rec_seq[TLS_MAX_REC_SEQ_SIZE];
};

union tls_crypto_context {
Expand Down Expand Up @@ -302,6 +300,7 @@ struct tls_offload_resync_async {
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
};

#define TLS_DRIVER_STATE_SIZE_RX 8
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
Expand All @@ -325,17 +324,13 @@ struct tls_offload_context_rx {
struct tls_offload_resync_async *resync_async;
};
};
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
#define TLS_DRIVER_STATE_SIZE_RX 8
u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8);
};

#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)

struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);

Expand Down
12 changes: 8 additions & 4 deletions net/tls/tls.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ struct tls_rec {
struct sock *sk;

char aad_space[TLS_AAD_SPACE_SIZE];
u8 iv_data[MAX_IV_SIZE];
u8 iv_data[TLS_MAX_IV_SIZE];
struct aead_request aead_req;
u8 aead_req_ctx[];
};
Expand All @@ -142,7 +142,11 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
void tls_err_abort(struct sock *sk, int err);

int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
const struct tls_cipher_desc *cipher_desc,
int mode);
int tls_set_sw_offload(struct sock *sk, int tx);
void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
Expand Down Expand Up @@ -223,7 +227,7 @@ static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
#ifdef CONFIG_TLS_DEVICE
int tls_device_init(void);
void tls_device_cleanup(void);
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
int tls_set_device_offload(struct sock *sk);
void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
Expand All @@ -234,7 +238,7 @@ static inline int tls_device_init(void) { return 0; }
static inline void tls_device_cleanup(void) {}

static inline int
tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
tls_set_device_offload(struct sock *sk)
{
return -EOPNOTSUPP;
}
Expand Down
Loading

0 comments on commit 35715ac

Please sign in to comment.