Skip to content

Commit

Permalink
Merge branch 'net-tls-small-general-improvements'
Browse files Browse the repository at this point in the history
Jakub Kicinski says:

====================
net/tls: small general improvements

This series cleans up and improves the tls code, mostly the offload
parts.

First a slight performance optimization - avoiding unnecessary re-
-encryption of records in patch 1.  Next patch 2 makes the code
more resilient by checking for errors in skb_copy_bits().  Next
commit removes a warning which can be triggered in normal operation,
(especially for devices explicitly making use of the fallback path).
Next two paths change the condition checking around the call to
tls_device_decrypted() to make it easier to extend.  Remaining
commits are centered around reorganizing struct tls_context for
better cache utilization.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jun 4, 2019
2 parents 7e7d199 + fb0f886 commit b20ac8d
Show file tree
Hide file tree
Showing 8 changed files with 75 additions and 65 deletions.
19 changes: 0 additions & 19 deletions Documentation/networking/tls-offload.rst
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,6 @@ by the driver:
but did not arrive in the expected order
* ``tx_tls_drop_no_sync_data`` - number of TX packets dropped because
they arrived out of order and associated record could not be found
(see also :ref:`pre_tls_data`)

Notable corner cases, exceptions and additional requirements
============================================================
Expand Down Expand Up @@ -462,21 +461,3 @@ Redirects leak clear text

In the RX direction, if segment has already been decrypted by the device
and it gets redirected or mirrored - clear text will be transmitted out.

.. _pre_tls_data:

Transmission of pre-TLS data
----------------------------

User can enqueue some already encrypted and framed records before enabling
``ktls`` on the socket. Those records have to get sent as they are. This is
perfectly easy to handle in the software case - such data will be waiting
in the TCP layer, TLS ULP won't see it. In the offloaded case when pre-queued
segment reaches transmission point it appears to be out of order (before the
expected TCP sequence number) and the stack does not have a record information
associated.

All segments without record information cannot, however, be assumed to be
pre-queued data, because a race condition exists between TCP stack queuing
a retransmission, the driver seeing the retransmission and TCP ACK arriving
for the retransmitted data.
1 change: 1 addition & 0 deletions include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -1063,6 +1063,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
int max_page_order,
int *errcode,
gfp_t gfp_mask);
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);

/* Layout of fast clones : [skb1][skb2][fclone_ref] */
struct sk_buff_fclones {
Expand Down
36 changes: 18 additions & 18 deletions include/net/tls.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,34 +236,32 @@ struct tls_prot_info {
};

struct tls_context {
/* read-only cache line */
struct tls_prot_info prot_info;

union tls_crypto_context crypto_send;
union tls_crypto_context crypto_recv;
u8 tx_conf:3;
u8 rx_conf:3;

struct list_head list;
struct net_device *netdev;
refcount_t refcount;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);

void *priv_ctx_tx;
void *priv_ctx_rx;

u8 tx_conf:3;
u8 rx_conf:3;
struct net_device *netdev;

/* rw cache line */
struct cipher_context tx;
struct cipher_context rx;

struct scatterlist *partially_sent_record;
u16 partially_sent_offset;

unsigned long flags;
bool in_tcp_sendpages;
bool pending_open_record_frags;
unsigned long flags;

int (*push_pending_record)(struct sock *sk, int flags);

void (*sk_write_space)(struct sock *sk);
/* cache cold stuff */
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);

Expand All @@ -275,6 +273,12 @@ struct tls_context {
int __user *optlen);
int (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);

union tls_crypto_context crypto_send;
union tls_crypto_context crypto_recv;

struct list_head list;
refcount_t refcount;
};

enum tls_offload_ctx_dir {
Expand Down Expand Up @@ -442,19 +446,15 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
}

static inline void tls_advance_record_sn(struct sock *sk,
struct cipher_context *ctx,
int version)
struct tls_prot_info *prot,
struct cipher_context *ctx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;

if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
tls_err_abort(sk, EBADMSG);

if (version != TLS_1_3_VERSION) {
if (prot->version != TLS_1_3_VERSION)
tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
prot->iv_size);
}
}

static inline void tls_fill_prepend(struct tls_context *ctx,
Expand Down
25 changes: 25 additions & 0 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -913,6 +913,31 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
#undef C
}

/**
* alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
* @first: first sk_buff of the msg
*/
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
{
struct sk_buff *n;

n = alloc_skb(0, GFP_ATOMIC);
if (!n)
return NULL;

n->len = first->len;
n->data_len = first->len;
n->truesize = first->truesize;

skb_shinfo(n)->frag_list = first;

__copy_skb_header(n, first);
n->destructor = NULL;

return n;
}
EXPORT_SYMBOL_GPL(alloc_skb_for_msg);

/**
* skb_morph - morph one skb into another
* @dst: the skb to receive the contents
Expand Down
8 changes: 2 additions & 6 deletions net/strparser/strparser.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,18 +160,14 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
return 0;
}

skb = alloc_skb(0, GFP_ATOMIC);
skb = alloc_skb_for_msg(head);
if (!skb) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = -ENOMEM;
return 0;
}
skb->len = head->len;
skb->data_len = head->len;
skb->truesize = head->truesize;
*_strp_msg(skb) = *_strp_msg(head);

strp->skb_nextp = &head->next;
skb_shinfo(skb)->frag_list = head;
strp->skb_head = skb;
head = skb;
} else {
Expand Down
28 changes: 16 additions & 12 deletions net/tls/tls_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ static int tls_push_record(struct sock *sk,
skb_frag_address(frag),
record->len - prot->prepend_size,
record_type,
ctx->crypto_send.info.version);
prot->version);

/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
Expand All @@ -264,7 +264,7 @@ static int tls_push_record(struct sock *sk,
list_add_tail(&record->list, &offload_ctx->records_list);
spin_unlock_irq(&offload_ctx->lock);
offload_ctx->open_record = NULL;
tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
tls_advance_record_sn(sk, prot, &ctx->tx);

for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
Expand Down Expand Up @@ -603,8 +603,10 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
sg_set_buf(&sg[0], buf,
rxm->full_len + TLS_HEADER_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE);
skb_copy_bits(skb, offset, buf,
TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
err = skb_copy_bits(skb, offset, buf,
TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
if (err)
goto free_buf;

/* We are interested only in the decrypted data not the auth */
err = decrypt_skb(sk, skb, sg);
Expand All @@ -618,8 +620,11 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);

if (skb->decrypted)
skb_store_bits(skb, offset, buf, copy);
if (skb->decrypted) {
err = skb_store_bits(skb, offset, buf, copy);
if (err)
goto free_buf;
}

offset += copy;
buf += copy;
Expand All @@ -642,8 +647,11 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
copy = min_t(int, skb_iter->len - frag_pos,
data_len + rxm->offset - offset);

if (skb_iter->decrypted)
skb_store_bits(skb_iter, frag_pos, buf, copy);
if (skb_iter->decrypted) {
err = skb_store_bits(skb_iter, frag_pos, buf, copy);
if (err)
goto free_buf;
}

offset += copy;
buf += copy;
Expand All @@ -664,10 +672,6 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;

/* Skip if it is already decrypted */
if (ctx->sw.decrypted)
return 0;

/* Check if all the data is decrypted already */
skb_walk_frags(skb, skb_iter) {
is_decrypted &= skb_iter->decrypted;
Expand Down
6 changes: 4 additions & 2 deletions net/tls/tls_device_fallback.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,6 @@ static int fill_sg_in(struct scatterlist *sg_in,
record = tls_get_record(ctx, tcp_seq, rcd_sn);
if (!record) {
spin_unlock_irqrestore(&ctx->lock, flags);
WARN(1, "Record not found for seq %u\n", tcp_seq);
return -EINVAL;
}

Expand Down Expand Up @@ -409,7 +408,10 @@ static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
put_page(sg_page(&sg_in[--resync_sgs]));
kfree(sg_in);
free_orig:
kfree_skb(skb);
if (nskb)
consume_skb(skb);
else
kfree_skb(skb);
return nskb;
}

Expand Down
17 changes: 9 additions & 8 deletions net/tls/tls_sw.c
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ static int tls_do_encryption(struct sock *sk,

/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
tls_advance_record_sn(sk, &tls_ctx->tx, prot->version);
tls_advance_record_sn(sk, prot, &tls_ctx->tx);
return rc;
}

Expand Down Expand Up @@ -1486,24 +1486,25 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
int version = prot->version;
struct strp_msg *rxm = strp_msg(skb);
int pad, err = 0;

if (!ctx->decrypted) {
#ifdef CONFIG_TLS_DEVICE
err = tls_device_decrypted(sk, skb);
if (err < 0)
return err;
if (tls_ctx->rx_conf == TLS_HW) {
err = tls_device_decrypted(sk, skb);
if (err < 0)
return err;
}
#endif
/* Still not decrypted after tls_device */
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
async);
if (err < 0) {
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, &tls_ctx->rx,
version);
tls_advance_record_sn(sk, prot,
&tls_ctx->rx);

return err;
}
Expand All @@ -1518,7 +1519,7 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
rxm->full_len -= pad;
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
tls_advance_record_sn(sk, &tls_ctx->rx, version);
tls_advance_record_sn(sk, prot, &tls_ctx->rx);
ctx->decrypted = true;
ctx->saved_data_ready(sk);
} else {
Expand Down

0 comments on commit b20ac8d

Please sign in to comment.